From ca61fad47cf624b253d0b29ac9ebba8b3e218f69 Mon Sep 17 00:00:00 2001 From: Joel Takvorian Date: Wed, 31 May 2023 08:42:58 +0200 Subject: [PATCH] NETOBSERV-773 Copy certificates across namespaces (#326) * NETOBSERV-773 Copy certificates across namespaces - Any certificate (secret/cm) can now be referenced from any namespace, which makes the operator watch the original and copy it to a target namespace. It allows not only to deploy Loki (or Kafka) in any namespace, but also fixes the issue of ebpf pods not having access to kafka CA/key without manual intervention And quite a big refactoring: - New "watchers" and "volumes" packages - Creation of volumes is now using a builder-style approach allowing to incrementally add volumes and get at the same time their path. It avoid having discrepancies between mounted volumes and their related path reference. - Watching certificates (or any CM/secret) now uses digest hash of content instead of metadata, to avoid triggering pods restart when a cm/secret was changed despite its content remaining the same - Some things are moved around to make internal APIs easier to use, less parameters in functions, etc. - New extensive integration tests on certificates management Fix failing tests * Watch loki statusTls certs * Remove flaky test (they're already run in flowcollector_controller_test) --- api/v1alpha1/flowcollector_types.go | 14 +- api/v1alpha1/zz_generated.conversion.go | 6 +- api/v1beta1/flowcollector_types.go | 14 +- .../flows.netobserv.io_flowcollectors.yaml | 134 ++++++ ...observ-operator.clusterserviceversion.yaml | 2 +- .../flows.netobserv.io_flowcollectors.yaml | 134 ++++++ config/rbac/role.yaml | 2 +- .../consoleplugin/consoleplugin_objects.go | 41 +- .../consoleplugin/consoleplugin_reconciler.go | 85 ++-- controllers/constants/constants.go | 16 +- controllers/ebpf/agent_controller.go | 105 ++-- .../ebpf/internal/permissions/permissions.go | 59 +-- controllers/flowcollector_controller.go | 80 +-- ...wcollector_controller_certificates_test.go | 454 ++++++++++++++++++ .../flowcollector_controller_iso_test.go | 68 +-- controllers/flowcollector_controller_test.go | 22 +- .../flowlogspipeline/flp_common_objects.go | 114 ++--- .../flowlogspipeline/flp_ingest_objects.go | 13 +- .../flowlogspipeline/flp_ingest_reconciler.go | 64 +-- .../flowlogspipeline/flp_monolith_objects.go | 11 +- .../flp_monolith_reconciler.go | 76 +-- .../flowlogspipeline/flp_reconciler.go | 66 +-- controllers/flowlogspipeline/flp_test.go | 155 +++--- .../flowlogspipeline/flp_transfo_objects.go | 15 +- .../flp_transfo_reconciler.go | 84 ++-- controllers/ovs/flowsconfig_cno_reconciler.go | 31 +- .../ovs/flowsconfig_ovnk_reconciler.go | 19 +- .../{client_helper.go => common.go} | 119 +++-- .../reconcilers/namespaced_objects_manager.go | 37 +- controllers/suite_test.go | 21 +- docs/FlowCollector.md | 144 ++++++ pkg/helper/certificates.go | 116 ----- pkg/helper/client_helper.go | 80 +++ pkg/helper/flowcollector.go | 8 + pkg/helper/tokens.go | 33 -- pkg/test/kube_mock.go | 87 ++++ pkg/volumes/builder.go | 131 +++++ pkg/watchers/certificates_watcher.go | 117 ----- pkg/watchers/certificates_watcher_test.go | 123 ----- pkg/watchers/object_ref.go | 35 ++ pkg/watchers/watchables.go | 82 ++++ pkg/watchers/watcher.go | 157 ++++++ pkg/watchers/watcher_test.go | 209 ++++++++ 43 files changed, 2278 insertions(+), 1105 deletions(-) create mode 100644 controllers/flowcollector_controller_certificates_test.go rename controllers/reconcilers/{client_helper.go => common.go} (60%) delete mode 100644 pkg/helper/certificates.go create mode 100644 pkg/helper/client_helper.go delete mode 100644 pkg/helper/tokens.go create mode 100644 pkg/test/kube_mock.go create mode 100644 pkg/volumes/builder.go delete mode 100644 pkg/watchers/certificates_watcher.go delete mode 100644 pkg/watchers/certificates_watcher_test.go create mode 100644 pkg/watchers/object_ref.go create mode 100644 pkg/watchers/watchables.go create mode 100644 pkg/watchers/watcher.go create mode 100644 pkg/watchers/watcher_test.go diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index 587d024c8..36613224c 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -573,15 +573,17 @@ type OVNKubernetesConfig struct { ContainerName string `json:"containerName,omitempty"` } +type MountableType string + const ( - CertRefTypeSecret = "secret" - CertRefTypeConfigMap = "configmap" + CertRefTypeSecret MountableType = "secret" + CertRefTypeConfigMap MountableType = "configmap" ) type CertificateReference struct { //+kubebuilder:validation:Enum=configmap;secret // type for the certificate reference: "configmap" or "secret" - Type string `json:"type,omitempty"` + Type MountableType `json:"type,omitempty"` // name of the config map or secret containing certificates Name string `json:"name,omitempty"` @@ -592,6 +594,12 @@ type CertificateReference struct { // certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. // +optional CertKey string `json:"certKey,omitempty"` + + // namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` } // ClientTLS defines TLS client configuration diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index a762dff9e..13c8410bd 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -272,10 +272,11 @@ func RegisterConversions(s *runtime.Scheme) error { } func autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *CertificateReference, out *v1beta1.CertificateReference, s conversion.Scope) error { - out.Type = in.Type + out.Type = v1beta1.MountableType(in.Type) out.Name = in.Name out.CertFile = in.CertFile out.CertKey = in.CertKey + out.Namespace = in.Namespace return nil } @@ -285,8 +286,9 @@ func Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *C } func autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta1.CertificateReference, out *CertificateReference, s conversion.Scope) error { - out.Type = in.Type + out.Type = MountableType(in.Type) out.Name = in.Name + out.Namespace = in.Namespace out.CertFile = in.CertFile out.CertKey = in.CertKey return nil diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 829c805d6..2b81dc59a 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -641,19 +641,27 @@ type OVNKubernetesConfig struct { ContainerName string `json:"containerName,omitempty"` } +type MountableType string + const ( - CertRefTypeSecret = "secret" - CertRefTypeConfigMap = "configmap" + RefTypeSecret MountableType = "secret" + RefTypeConfigMap MountableType = "configmap" ) type CertificateReference struct { //+kubebuilder:validation:Enum=configmap;secret // type for the certificate reference: "configmap" or "secret" - Type string `json:"type,omitempty"` + Type MountableType `json:"type,omitempty"` // name of the config map or secret containing certificates Name string `json:"name,omitempty"` + // namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` + // certFile defines the path to the certificate file name within the config map or secret CertFile string `json:"certFile,omitempty"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index e1e871613..00ed59e61 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -1025,6 +1025,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1061,6 +1070,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1121,6 +1139,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1157,6 +1183,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1275,6 +1309,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1311,6 +1353,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1992,6 +2042,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3172,6 +3231,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3208,6 +3276,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3269,6 +3346,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3305,6 +3390,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3404,6 +3497,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3440,6 +3541,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3489,6 +3598,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3525,6 +3642,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -4246,6 +4371,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index fe6afc1c5..7f0a0c776 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -522,6 +522,7 @@ spec: resources: - configmaps - namespaces + - secrets - serviceaccounts - services verbs: @@ -536,7 +537,6 @@ spec: - "" resources: - endpoints - - secrets verbs: - get - list diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index f36155a8a..1d01e2488 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -1012,6 +1012,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1048,6 +1057,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1108,6 +1126,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1144,6 +1170,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1262,6 +1296,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1298,6 +1340,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -1979,6 +2029,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3159,6 +3218,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3195,6 +3263,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3256,6 +3333,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3292,6 +3377,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3391,6 +3484,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3427,6 +3528,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3476,6 +3585,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -3512,6 +3629,14 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' @@ -4233,6 +4358,15 @@ spec: description: name of the config map or secret containing certificates type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string type: description: 'type for the certificate reference: "configmap" or "secret"' diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d61fcc9de..000b593c2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -74,6 +74,7 @@ rules: resources: - configmaps - namespaces + - secrets - serviceaccounts - services verbs: @@ -88,7 +89,6 @@ rules: - "" resources: - endpoints - - secrets verbs: - get - list diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index 8ad2d9639..d8bf6531e 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -20,6 +20,7 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/volumes" ) const secretName = "console-serving-cert" @@ -30,9 +31,6 @@ const configMapName = "console-plugin-config" const configFile = "config.yaml" const configVolume = "config-volume" const configPath = "/opt/app-root/" -const lokiCerts = "loki-certs" -const lokiStatusCerts = "loki-status-certs" -const tokensPath = "/var/run/secrets/tokens/" const metricsSvcName = constants.PluginName + "-metrics" const metricsPort = 9002 const metricsPortName = "metrics" @@ -43,6 +41,7 @@ type builder struct { selector map[string]string desired *flowslatest.FlowCollectorSpec imageName string + volumes volumes.Builder } func newBuilder(ns, imageName string, desired *flowslatest.FlowCollectorSpec) builder { @@ -161,14 +160,7 @@ func (b *builder) deployment(cmDigest string) *appsv1.Deployment { } } -func tokenPath(desiredLoki *flowslatest.FlowCollectorLoki) string { - if helper.LokiUseHostToken(desiredLoki) { - return tokensPath + constants.PluginName - } - return "" -} - -func buildArgs(desired *flowslatest.FlowCollectorSpec) []string { +func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { querierURL := querierURL(&desired.Loki) statusURL := statusURL(&desired.Loki) @@ -200,7 +192,7 @@ func buildArgs(desired *flowslatest.FlowCollectorSpec) []string { if desired.Loki.TLS.InsecureSkipVerify { args = append(args, "-loki-skip-tls") } else { - caPath := helper.GetCACertPath(&desired.Loki.TLS, lokiCerts) + caPath := b.volumes.AddCACertificate(&desired.Loki.TLS, "loki-certs") if caPath != "" { args = append(args, "-loki-ca-path", caPath) } @@ -212,12 +204,10 @@ func buildArgs(desired *flowslatest.FlowCollectorSpec) []string { if statusTLS.InsecureSkipVerify { args = append(args, "-loki-status-skip-tls") } else { - statusCaPath := helper.GetCACertPath(&statusTLS, lokiStatusCerts) + statusCaPath, userCertPath, userKeyPath := b.volumes.AddMutualTLSCertificates(&statusTLS, "loki-status-certs") if statusCaPath != "" { args = append(args, "-loki-status-ca-path", statusCaPath) } - userCertPath := helper.GetUserCertPath(&statusTLS, lokiStatusCerts) - userKeyPath := helper.GetUserKeyPath(&statusTLS, lokiStatusCerts) if userCertPath != "" && userKeyPath != "" { args = append(args, "-loki-status-user-cert-path", userCertPath) args = append(args, "-loki-status-user-key-path", userKeyPath) @@ -226,7 +216,8 @@ func buildArgs(desired *flowslatest.FlowCollectorSpec) []string { } if helper.LokiUseHostToken(&desired.Loki) { - args = append(args, "-loki-token-path", tokenPath(&desired.Loki)) + tokenPath := b.volumes.AddToken(constants.PluginName) + args = append(args, "-loki-token-path", tokenPath) } return args } @@ -262,19 +253,7 @@ func (b *builder) podTemplate(cmDigest string) *corev1.PodTemplateSpec { }, } - args := buildArgs(b.desired) - if b.desired != nil && b.desired.Loki.TLS.Enable && !b.desired.Loki.TLS.InsecureSkipVerify { - volumes, volumeMounts = helper.AppendCertVolumes(volumes, volumeMounts, &b.desired.Loki.TLS, lokiCerts) - } - - statusTLS := helper.GetLokiStatusTLS(&b.desired.Loki) - if b.desired != nil && statusTLS.Enable && !statusTLS.InsecureSkipVerify { - volumes, volumeMounts = helper.AppendCertVolumes(volumes, volumeMounts, &statusTLS, lokiStatusCerts) - } - - if helper.LokiUseHostToken(&b.desired.Loki) { - volumes, volumeMounts = helper.AppendTokenVolume(volumes, volumeMounts, constants.PluginName, constants.PluginName) - } + args := b.buildArgs(b.desired) return &corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -289,10 +268,10 @@ func (b *builder) podTemplate(cmDigest string) *corev1.PodTemplateSpec { Image: b.imageName, ImagePullPolicy: corev1.PullPolicy(b.desired.ConsolePlugin.ImagePullPolicy), Resources: *b.desired.ConsolePlugin.Resources.DeepCopy(), - VolumeMounts: volumeMounts, + VolumeMounts: b.volumes.AppendMounts(volumeMounts), Args: args, }}, - Volumes: volumes, + Volumes: b.volumes.AppendVolumes(volumes), ServiceAccountName: constants.PluginName, }, } diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go index cb377c9ee..57a24b685 100644 --- a/controllers/consoleplugin/consoleplugin_reconciler.go +++ b/controllers/consoleplugin/consoleplugin_reconciler.go @@ -4,7 +4,6 @@ import ( "context" "reflect" - "github.com/netobserv/network-observability-operator/pkg/discover" osv1alpha1 "github.com/openshift/api/console/v1alpha1" operatorsv1 "github.com/openshift/api/operator/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -26,11 +25,8 @@ type pluginSpec = flowslatest.FlowCollectorConsolePlugin // CPReconciler reconciles the current console plugin state with the desired configuration type CPReconciler struct { - reconcilers.ClientHelper - nobjMngr *reconcilers.NamespacedObjectManager - owned ownedObjects - image string - availableAPIs *discover.AvailableAPIs + *reconcilers.Instance + owned ownedObjects } type ownedObjects struct { @@ -43,7 +39,7 @@ type ownedObjects struct { serviceMonitor *monitoringv1.ServiceMonitor } -func NewReconciler(cl reconcilers.ClientHelper, ns, prevNS, imageName string, availableAPIs *discover.AvailableAPIs) CPReconciler { +func NewReconciler(common *reconcilers.Common, imageName string) CPReconciler { owned := ownedObjects{ deployment: &appsv1.Deployment{}, service: &corev1.Service{}, @@ -53,30 +49,30 @@ func NewReconciler(cl reconcilers.ClientHelper, ns, prevNS, imageName string, av configMap: &corev1.ConfigMap{}, serviceMonitor: &monitoringv1.ServiceMonitor{}, } - nobjMngr := reconcilers.NewNamespacedObjectManager(cl, ns, prevNS) - nobjMngr.AddManagedObject(constants.PluginName, owned.deployment) - nobjMngr.AddManagedObject(constants.PluginName, owned.service) - nobjMngr.AddManagedObject(metricsSvcName, owned.metricsService) - nobjMngr.AddManagedObject(constants.PluginName, owned.hpa) - nobjMngr.AddManagedObject(constants.PluginName, owned.serviceAccount) - nobjMngr.AddManagedObject(configMapName, owned.configMap) - if availableAPIs.HasSvcMonitor() { - nobjMngr.AddManagedObject(constants.PluginName, owned.serviceMonitor) + cmnInstance := common.NewInstance(imageName) + cmnInstance.Managed.AddManagedObject(constants.PluginName, owned.deployment) + cmnInstance.Managed.AddManagedObject(constants.PluginName, owned.service) + cmnInstance.Managed.AddManagedObject(metricsSvcName, owned.metricsService) + cmnInstance.Managed.AddManagedObject(constants.PluginName, owned.hpa) + cmnInstance.Managed.AddManagedObject(constants.PluginName, owned.serviceAccount) + cmnInstance.Managed.AddManagedObject(configMapName, owned.configMap) + if common.AvailableAPIs.HasSvcMonitor() { + cmnInstance.Managed.AddManagedObject(constants.PluginName, owned.serviceMonitor) } - return CPReconciler{ClientHelper: cl, nobjMngr: nobjMngr, owned: owned, image: imageName, availableAPIs: availableAPIs} + return CPReconciler{Instance: cmnInstance, owned: owned} } // CleanupNamespace cleans up old namespace func (r *CPReconciler) CleanupNamespace(ctx context.Context) { - r.nobjMngr.CleanupPreviousNamespace(ctx) + r.Managed.CleanupPreviousNamespace(ctx) } // Reconcile is the reconciler entry point to reconcile the current plugin state with the desired configuration func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollector) error { - ns := r.nobjMngr.Namespace + ns := r.Managed.Namespace // Retrieve current owned objects - err := r.nobjMngr.FetchAll(ctx) + err := r.Managed.FetchAll(ctx) if err != nil { return err } @@ -86,30 +82,39 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC } // Create object builder - builder := newBuilder(ns, r.image, &desired.Spec) + builder := newBuilder(ns, r.Instance.Image, &desired.Spec) if err := r.reconcilePermissions(ctx, &builder); err != nil { return err } - if err = r.reconcilePlugin(ctx, builder, &desired.Spec); err != nil { + if err = r.reconcilePlugin(ctx, &builder, &desired.Spec); err != nil { return err } - cmDigest, err := r.reconcileConfigMap(ctx, builder, &desired.Spec) + cmDigest, err := r.reconcileConfigMap(ctx, &builder, &desired.Spec) if err != nil { return err } - if err = r.reconcileDeployment(ctx, builder, &desired.Spec, cmDigest); err != nil { + if err = r.reconcileDeployment(ctx, &builder, &desired.Spec, cmDigest); err != nil { return err } - if err = r.reconcileServices(ctx, builder, &desired.Spec); err != nil { + if err = r.reconcileServices(ctx, &builder, &desired.Spec); err != nil { return err } - if err = r.reconcileHPA(ctx, builder, &desired.Spec); err != nil { + if err = r.reconcileHPA(ctx, &builder, &desired.Spec); err != nil { + return err + } + + // Watch for Loki certificates if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation + // because certificate is always reloaded from file + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + return err + } + if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.StatusTLS, r.Namespace); err != nil { return err } @@ -138,7 +143,7 @@ func (r *CPReconciler) checkAutoPatch(ctx context.Context, desired *flowslatest. } func (r *CPReconciler) reconcilePermissions(ctx context.Context, builder *builder) error { - if !r.nobjMngr.Exists(r.owned.serviceAccount) { + if !r.Managed.Exists(r.owned.serviceAccount) { return r.CreateOwned(ctx, builder.serviceAccount()) } // update not needed for now @@ -154,7 +159,7 @@ func (r *CPReconciler) reconcilePermissions(ctx context.Context, builder *builde return nil } -func (r *CPReconciler) reconcilePlugin(ctx context.Context, builder builder, desired *flowslatest.FlowCollectorSpec) error { +func (r *CPReconciler) reconcilePlugin(ctx context.Context, builder *builder, desired *flowslatest.FlowCollectorSpec) error { // Console plugin is cluster-scope (it's not deployed in our namespace) however it must still be updated if our namespace changes oldPlg := osv1alpha1.ConsolePlugin{} pluginExists := true @@ -181,9 +186,9 @@ func (r *CPReconciler) reconcilePlugin(ctx context.Context, builder builder, des return nil } -func (r *CPReconciler) reconcileConfigMap(ctx context.Context, builder builder, desired *flowslatest.FlowCollectorSpec) (string, error) { +func (r *CPReconciler) reconcileConfigMap(ctx context.Context, builder *builder, desired *flowslatest.FlowCollectorSpec) (string, error) { newCM, configDigest := builder.configMap() - if !r.nobjMngr.Exists(r.owned.configMap) { + if !r.Managed.Exists(r.owned.configMap) { if err := r.CreateOwned(ctx, newCM); err != nil { return "", err } @@ -195,12 +200,12 @@ func (r *CPReconciler) reconcileConfigMap(ctx context.Context, builder builder, return configDigest, nil } -func (r *CPReconciler) reconcileDeployment(ctx context.Context, builder builder, desired *flowslatest.FlowCollectorSpec, cmDigest string) error { +func (r *CPReconciler) reconcileDeployment(ctx context.Context, builder *builder, desired *flowslatest.FlowCollectorSpec, cmDigest string) error { report := helper.NewChangeReport("Console deployment") defer report.LogIfNeeded(ctx) newDepl := builder.deployment(cmDigest) - if !r.nobjMngr.Exists(r.owned.deployment) { + if !r.Managed.Exists(r.owned.deployment) { if err := r.CreateOwned(ctx, newDepl); err != nil { return err } @@ -214,35 +219,35 @@ func (r *CPReconciler) reconcileDeployment(ctx context.Context, builder builder, return nil } -func (r *CPReconciler) reconcileServices(ctx context.Context, builder builder, desired *flowslatest.FlowCollectorSpec) error { +func (r *CPReconciler) reconcileServices(ctx context.Context, builder *builder, desired *flowslatest.FlowCollectorSpec) error { report := helper.NewChangeReport("Console services") defer report.LogIfNeeded(ctx) - if err := reconcilers.ReconcileService(ctx, r.nobjMngr, &r.ClientHelper, r.owned.service, builder.mainService(), &report); err != nil { + if err := r.ReconcileService(ctx, r.owned.service, builder.mainService(), &report); err != nil { return err } - if err := reconcilers.ReconcileService(ctx, r.nobjMngr, &r.ClientHelper, r.owned.metricsService, builder.metricsService(), &report); err != nil { + if err := r.ReconcileService(ctx, r.owned.metricsService, builder.metricsService(), &report); err != nil { return err } - if r.availableAPIs.HasSvcMonitor() { + if r.AvailableAPIs.HasSvcMonitor() { serviceMonitor := builder.serviceMonitor() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } return nil } -func (r *CPReconciler) reconcileHPA(ctx context.Context, builder builder, desired *flowslatest.FlowCollectorSpec) error { +func (r *CPReconciler) reconcileHPA(ctx context.Context, builder *builder, desired *flowslatest.FlowCollectorSpec) error { report := helper.NewChangeReport("Console autoscaler") defer report.LogIfNeeded(ctx) // Delete or Create / Update Autoscaler according to HPA option if helper.HPADisabled(&desired.ConsolePlugin.Autoscaler) { - r.nobjMngr.TryDelete(ctx, r.owned.hpa) + r.Managed.TryDelete(ctx, r.owned.hpa) } else { newASC := builder.autoScaler() - if !r.nobjMngr.Exists(r.owned.hpa) { + if !r.Managed.Exists(r.owned.hpa) { if err := r.CreateOwned(ctx, newASC); err != nil { return err } diff --git a/controllers/constants/constants.go b/controllers/constants/constants.go index 5f8a78ef9..98a2f79b5 100644 --- a/controllers/constants/constants.go +++ b/controllers/constants/constants.go @@ -20,12 +20,13 @@ const ( // PodConfigurationDigest is an annotation name to facilitate pod restart after // any external configuration change - AnnotationDomain = "flows.netobserv.io" - PodConfigurationDigest = AnnotationDomain + "/config-digest" - PodCertIDSuffix = AnnotationDomain + "/cert-" - ConversionAnnotation = AnnotationDomain + "/conversion-data" - CertCASuffix = "ca" - CertUserSuffix = "user" + AnnotationDomain = "flows.netobserv.io" + PodConfigurationDigest = AnnotationDomain + "/config-digest" + PodWatchedSuffix = AnnotationDomain + "/watched-" + ConversionAnnotation = AnnotationDomain + "/conversion-data" + NamespaceCopyAnnotation = AnnotationDomain + "/copied-from" + + TokensPath = "/var/run/secrets/tokens/" FlowLogType = "flowLog" NewConnectionType = "newConnection" @@ -36,6 +37,3 @@ const ( var LokiIndexFields = []string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_Namespace", "DstK8S_OwnerName", "FlowDirection"} var LokiConnectionIndexFields = []string{"_RecordType"} var FlowCollectorName = types.NamespacedName{Name: "cluster"} - -func CertCAName(prefix string) string { return prefix + "-" + CertCASuffix } -func CertUserName(prefix string) string { return prefix + "-" + CertUserSuffix } diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 9c233d1ea..887c1efab 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -6,9 +6,6 @@ import ( "strconv" "strings" - "github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions" - "github.com/netobserv/network-observability-operator/controllers/operator" - "github.com/netobserv/network-observability-operator/pkg/discover" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -19,8 +16,12 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions" + "github.com/netobserv/network-observability-operator/controllers/operator" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/volumes" + "github.com/netobserv/network-observability-operator/pkg/watchers" "k8s.io/apimachinery/pkg/api/equality" ) @@ -56,7 +57,6 @@ const ( exportGRPC = "grpc" ) -const kafkaCerts = "kafka-certs" const averageMessageSize = 100 type reconcileAction int @@ -71,30 +71,17 @@ const ( // associated objects that are required to bind the proper permissions: namespace, service // accounts, SecurityContextConstraints... type AgentController struct { - client reconcilers.ClientHelper - baseNamespace string - privilegedNamespace string - previousPrivilegedNamespace string - permissions permissions.Reconciler - config *operator.Config + reconcilers.Common + permissions permissions.Reconciler + config *operator.Config + volumes volumes.Builder } -func NewAgentController( - client reconcilers.ClientHelper, - baseNamespace string, - previousBaseNamespace string, - permissionsVendor *discover.Permissions, - config *operator.Config, -) *AgentController { - pns := baseNamespace + constants.EBPFPrivilegedNSSuffix - opns := previousBaseNamespace + constants.EBPFPrivilegedNSSuffix +func NewAgentController(common *reconcilers.Common, config *operator.Config) *AgentController { return &AgentController{ - client: client, - baseNamespace: baseNamespace, - privilegedNamespace: pns, - previousPrivilegedNamespace: opns, - permissions: permissions.NewReconciler(client, pns, opns, permissionsVendor), - config: config, + Common: *common, + permissions: permissions.NewReconciler(common), + config: config, } } @@ -106,7 +93,7 @@ func (c *AgentController) Reconcile( if err != nil { return fmt.Errorf("fetching current EBPF Agent: %w", err) } - if !helper.UseEBPF(&target.Spec) || c.previousPrivilegedNamespace != c.privilegedNamespace { + if !helper.UseEBPF(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { if current == nil { rlog.Info("nothing to do, as the requested agent is not eBPF", "currentAgent", target.Spec.Agent) @@ -116,7 +103,7 @@ func (c *AgentController) Reconcile( // undeploy the agent rlog.Info("user changed the agent type, or the target namespace. Deleting eBPF agent", "currentAgent", target.Spec.Agent) - if err := c.client.Delete(ctx, current); err != nil { + if err := c.Delete(ctx, current); err != nil { if errors.IsNotFound(err) { return nil } @@ -129,54 +116,55 @@ func (c *AgentController) Reconcile( if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.EBPF); err != nil { return fmt.Errorf("reconciling permissions: %w", err) } - desired := c.desired(target) + desired, err := c.desired(ctx, target) + if err != nil { + return err + } switch c.requiredAction(current, desired) { case actionCreate: rlog.Info("action: create agent") - return c.client.CreateOwned(ctx, desired) + return c.CreateOwned(ctx, desired) case actionUpdate: rlog.Info("action: update agent") - return c.client.UpdateOwned(ctx, current, desired) + return c.UpdateOwned(ctx, current, desired) default: rlog.Info("action: nothing to do") - c.client.CheckDaemonSetInProgress(current) + c.CheckDaemonSetInProgress(current) return nil } } func (c *AgentController) current(ctx context.Context) (*v1.DaemonSet, error) { agentDS := v1.DaemonSet{} - if err := c.client.Get(ctx, types.NamespacedName{ + if err := c.Get(ctx, types.NamespacedName{ Name: constants.EBPFAgentName, - Namespace: c.previousPrivilegedNamespace, + Namespace: c.PreviousPrivilegedNamespace(), }, &agentDS); err != nil { if errors.IsNotFound(err) { return nil, nil } return nil, fmt.Errorf("can't read DaemonSet %s/%s: %w", - c.previousPrivilegedNamespace, constants.EBPFAgentName, err) + c.PreviousPrivilegedNamespace(), constants.EBPFAgentName, err) } return &agentDS, nil } -func (c *AgentController) desired(coll *flowslatest.FlowCollector) *v1.DaemonSet { +func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCollector) (*v1.DaemonSet, error) { if coll == nil || !helper.UseEBPF(&coll.Spec) { - return nil + return nil, nil } version := helper.ExtractVersion(c.config.EBPFAgentImage) - volumeMounts := []corev1.VolumeMount{} - volumes := []corev1.Volume{} - if helper.UseKafka(&coll.Spec) && coll.Spec.Kafka.TLS.Enable { - // NOTE: secrets need to be copied from the base netobserv namespace to the privileged one. - // This operation must currently be performed manually (run "make fix-ebpf-kafka-tls"). It could be automated here. - volumes, volumeMounts = helper.AppendCertVolumes(volumes, volumeMounts, &coll.Spec.Kafka.TLS, kafkaCerts) + annotations := make(map[string]string) + env, err := c.envConfig(ctx, coll, annotations) + if err != nil { + return nil, err } return &v1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: constants.EBPFAgentName, - Namespace: c.privilegedNamespace, + Namespace: c.PrivilegedNamespace(), Labels: map[string]string{ "app": constants.EBPFAgentName, "version": helper.MaxLabelLength(version), @@ -188,7 +176,8 @@ func (c *AgentController) desired(coll *flowslatest.FlowCollector) *v1.DaemonSet }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": constants.EBPFAgentName}, + Labels: map[string]string{"app": constants.EBPFAgentName}, + Annotations: annotations, }, Spec: corev1.PodSpec{ // Allows deploying an instance in the master node @@ -196,23 +185,23 @@ func (c *AgentController) desired(coll *flowslatest.FlowCollector) *v1.DaemonSet ServiceAccountName: constants.EBPFServiceAccount, HostNetwork: true, DNSPolicy: corev1.DNSClusterFirstWithHostNet, - Volumes: volumes, + Volumes: c.volumes.GetVolumes(), Containers: []corev1.Container{{ Name: constants.EBPFAgentName, Image: c.config.EBPFAgentImage, ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.EBPF.ImagePullPolicy), Resources: coll.Spec.Agent.EBPF.Resources, SecurityContext: c.securityContext(coll), - Env: c.envConfig(coll), - VolumeMounts: volumeMounts, + Env: env, + VolumeMounts: c.volumes.GetMounts(), }}, }, }, }, - } + }, nil } -func (c *AgentController) envConfig(coll *flowslatest.FlowCollector) []corev1.EnvVar { +func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowCollector, annots map[string]string) ([]corev1.EnvVar, error) { var config []corev1.EnvVar if coll.Spec.Agent.EBPF.CacheActiveTimeout != "" { config = append(config, corev1.EnvVar{ @@ -278,12 +267,22 @@ func (c *AgentController) envConfig(coll *flowslatest.FlowCollector) []corev1.En corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize / averageMessageSize)}, ) if coll.Spec.Kafka.TLS.Enable { + // Annotate pod with certificate reference so that it is reloaded if modified + // If user cert is provided, it will use mTLS. Else, simple TLS (the userDigest and paths will be empty) + caDigest, userDigest, err := c.Watcher.ProcessMTLSCerts(ctx, c.Client, &coll.Spec.Kafka.TLS, c.PrivilegedNamespace()) + if err != nil { + return nil, err + } + annots[watchers.Annotation("kafka-ca")] = caDigest + annots[watchers.Annotation("kafka-user")] = userDigest + + caPath, userCertPath, userKeyPath := c.volumes.AddMutualTLSCertificates(&coll.Spec.Kafka.TLS, "kafka-certs") config = append(config, corev1.EnvVar{Name: envKafkaEnableTLS, Value: "true"}, corev1.EnvVar{Name: envKafkaTLSInsecureSkipVerify, Value: strconv.FormatBool(coll.Spec.Kafka.TLS.InsecureSkipVerify)}, - corev1.EnvVar{Name: envKafkaTLSCACertPath, Value: helper.GetCACertPath(&coll.Spec.Kafka.TLS, kafkaCerts)}, - corev1.EnvVar{Name: envKafkaTLSUserCertPath, Value: helper.GetUserCertPath(&coll.Spec.Kafka.TLS, kafkaCerts)}, - corev1.EnvVar{Name: envKafkaTLSUserKeyPath, Value: helper.GetUserKeyPath(&coll.Spec.Kafka.TLS, kafkaCerts)}, + corev1.EnvVar{Name: envKafkaTLSCACertPath, Value: caPath}, + corev1.EnvVar{Name: envKafkaTLSUserCertPath, Value: userCertPath}, + corev1.EnvVar{Name: envKafkaTLSUserKeyPath, Value: userKeyPath}, ) } } else { @@ -303,7 +302,7 @@ func (c *AgentController) envConfig(coll *flowslatest.FlowCollector) []corev1.En Value: strconv.Itoa(int(coll.Spec.Processor.Port)), }) } - return config + return config, nil } func (c *AgentController) requiredAction(current, desired *v1.DaemonSet) reconcileAction { diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index d4de3e38a..acfa8c721 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -8,7 +8,6 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/discover" "github.com/netobserv/network-observability-operator/pkg/helper" osv1 "github.com/openshift/api/security/v1" v1 "k8s.io/api/core/v1" @@ -27,24 +26,11 @@ var AllowedCapabilities = []v1.Capability{"BPF", "PERFMON", "NET_ADMIN", "SYS_RE // - Create netobserv-ebpf-agent service account in the privileged namespace // - For Openshift, apply the required SecurityContextConstraints for privileged Pod operation type Reconciler struct { - client reconcilers.ClientHelper - privilegedNamespace string - previousPrivilegedNamespace string - vendor *discover.Permissions + reconcilers.Common } -func NewReconciler( - client reconcilers.ClientHelper, - privilegedNamespace string, - previousPrivilegedNamespace string, - permissionsVendor *discover.Permissions, -) Reconciler { - return Reconciler{ - client: client, - privilegedNamespace: privilegedNamespace, - previousPrivilegedNamespace: previousPrivilegedNamespace, - vendor: permissionsVendor, - } +func NewReconciler(cmn *reconcilers.Common) Reconciler { + return Reconciler{Common: *cmn} } func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEBPF) error { @@ -63,14 +49,15 @@ func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCol } func (c *Reconciler) reconcileNamespace(ctx context.Context) error { - if c.previousPrivilegedNamespace != c.privilegedNamespace { + ns := c.PrivilegedNamespace() + if ns != c.PreviousPrivilegedNamespace() { if err := c.cleanupPreviousNamespace(ctx); err != nil { return err } } - rlog := log.FromContext(ctx, "PrivilegedNamespace", c.privilegedNamespace) + rlog := log.FromContext(ctx, "PrivilegedNamespace", ns) actual := &v1.Namespace{} - if err := c.client.Get(ctx, client.ObjectKey{Name: c.privilegedNamespace}, actual); err != nil { + if err := c.Get(ctx, client.ObjectKey{Name: ns}, actual); err != nil { if errors.IsNotFound(err) { actual = nil } else { @@ -79,7 +66,7 @@ func (c *Reconciler) reconcileNamespace(ctx context.Context) error { } desired := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: c.privilegedNamespace, + Name: ns, Labels: map[string]string{ "app": constants.OperatorName, "pod-security.kubernetes.io/enforce": "privileged", @@ -89,7 +76,7 @@ func (c *Reconciler) reconcileNamespace(ctx context.Context) error { } if actual == nil && desired != nil { rlog.Info("creating namespace") - return c.client.CreateOwned(ctx, desired) + return c.CreateOwned(ctx, desired) } if actual != nil && desired != nil { // We noticed that audit labels are automatically removed @@ -101,7 +88,7 @@ func (c *Reconciler) reconcileNamespace(ctx context.Context) error { "pod-security.kubernetes.io/enforce": "privileged", }) { rlog.Info("updating namespace") - return c.client.UpdateOwned(ctx, actual, desired) + return c.UpdateOwned(ctx, actual, desired) } } rlog.Info("namespace is already reconciled. Doing nothing") @@ -114,11 +101,11 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { sAcc := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: constants.EBPFServiceAccount, - Namespace: c.privilegedNamespace, + Namespace: c.PrivilegedNamespace(), }, } actual := &v1.ServiceAccount{} - if err := c.client.Get(ctx, client.ObjectKeyFromObject(sAcc), actual); err != nil { + if err := c.Get(ctx, client.ObjectKeyFromObject(sAcc), actual); err != nil { if errors.IsNotFound(err) { actual = nil } else { @@ -127,7 +114,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { } if actual == nil { rlog.Info("creating service account") - return c.client.CreateOwned(ctx, sAcc) + return c.CreateOwned(ctx, sAcc) } rlog.Info("service account already reconciled. Doing nothing") return nil @@ -136,7 +123,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { func (c *Reconciler) reconcileVendorPermissions( ctx context.Context, desired *flowslatest.FlowCollectorEBPF, ) error { - if c.vendor.Vendor(ctx) == discover.VendorOpenShift { + if c.UseOpenShiftSCC { return c.reconcileOpenshiftPermissions(ctx, desired) } return nil @@ -159,7 +146,7 @@ func (c *Reconciler) reconcileOpenshiftPermissions( Type: osv1.SELinuxStrategyRunAsAny, }, Users: []string{ - "system:serviceaccount:" + c.privilegedNamespace + ":" + constants.EBPFServiceAccount, + "system:serviceaccount:" + c.PrivilegedNamespace() + ":" + constants.EBPFServiceAccount, }, } if desired.Privileged { @@ -168,7 +155,7 @@ func (c *Reconciler) reconcileOpenshiftPermissions( scc.AllowedCapabilities = AllowedCapabilities } actual := &osv1.SecurityContextConstraints{} - if err := c.client.Get(ctx, client.ObjectKeyFromObject(scc), actual); err != nil { + if err := c.Get(ctx, client.ObjectKeyFromObject(scc), actual); err != nil { if errors.IsNotFound(err) { actual = nil } else { @@ -177,7 +164,7 @@ func (c *Reconciler) reconcileOpenshiftPermissions( } if actual == nil { rlog.Info("creating SecurityContextConstraints") - return c.client.CreateOwned(ctx, scc) + return c.CreateOwned(ctx, scc) } if scc.AllowHostNetwork != actual.AllowHostNetwork || !equality.Semantic.DeepDerivative(&scc.RunAsUser, &actual.RunAsUser) || @@ -187,20 +174,20 @@ func (c *Reconciler) reconcileOpenshiftPermissions( !equality.Semantic.DeepDerivative(&scc.AllowedCapabilities, &actual.AllowedCapabilities) { rlog.Info("updating SecurityContextConstraints") - return c.client.UpdateOwned(ctx, actual, scc) + return c.UpdateOwned(ctx, actual, scc) } rlog.Info("SecurityContextConstraints already reconciled. Doing nothing") return nil } func (c *Reconciler) cleanupPreviousNamespace(ctx context.Context) error { - rlog := log.FromContext(ctx, "PreviousPrivilegedNamespace", c.previousPrivilegedNamespace) + rlog := log.FromContext(ctx, "PreviousPrivilegedNamespace", c.PreviousPrivilegedNamespace()) // Delete service account - if err := c.client.Delete(ctx, &v1.ServiceAccount{ + if err := c.Delete(ctx, &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: constants.EBPFServiceAccount, - Namespace: c.previousPrivilegedNamespace, + Namespace: c.PreviousPrivilegedNamespace(), }, }); err != nil { if errors.IsNotFound(err) { @@ -211,7 +198,7 @@ func (c *Reconciler) cleanupPreviousNamespace(ctx context.Context) error { // Do not delete SCC as it's not namespace-scoped (it will be reconciled "as usual") previous := &v1.Namespace{} - if err := c.client.Get(ctx, client.ObjectKey{Name: c.previousPrivilegedNamespace}, previous); err != nil { + if err := c.Get(ctx, client.ObjectKey{Name: c.PreviousPrivilegedNamespace()}, previous); err != nil { if errors.IsNotFound(err) { // Not found => return without error rlog.Info("Previous privileged namespace not found, skipping cleanup") @@ -222,7 +209,7 @@ func (c *Reconciler) cleanupPreviousNamespace(ctx context.Context) error { // Make sure we own that namespace if len(previous.OwnerReferences) > 0 && strings.HasPrefix(previous.OwnerReferences[0].APIVersion, flowslatest.GroupVersion.Group) { rlog.Info("Owning previous privileged namespace: deleting it") - if err := c.client.Delete(ctx, previous); err != nil { + if err := c.Delete(ctx, previous); err != nil { if errors.IsNotFound(err) { return nil } diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index fe5b0d7d3..e75262105 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -5,7 +5,6 @@ import ( "fmt" "net" - "github.com/netobserv/network-observability-operator/controllers/operator" osv1alpha1 "github.com/openshift/api/console/v1alpha1" securityv1 "github.com/openshift/api/security/v1" appsv1 "k8s.io/api/apps/v1" @@ -28,6 +27,7 @@ import ( "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" + "github.com/netobserv/network-observability-operator/controllers/operator" "github.com/netobserv/network-observability-operator/controllers/ovs" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/conditions" @@ -48,7 +48,7 @@ type FlowCollectorReconciler struct { availableAPIs *discover.AvailableAPIs Scheme *runtime.Scheme config *operator.Config - certWatcher *watchers.CertificatesWatcher + watcher *watchers.Watcher lookupIP func(string) ([]net.IP, error) } @@ -62,8 +62,8 @@ func NewFlowCollectorReconciler(client client.Client, scheme *runtime.Scheme, co } //+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=core,resources=namespaces;services;serviceaccounts;configmaps,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=core,resources=secrets;endpoints,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=namespaces;services;serviceaccounts;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=endpoints,verbs=get;list;watch //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;roles,verbs=get;create;delete;watch;list //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings;rolebindings,verbs=get;list;create;delete;update;watch //+kubebuilder:rbac:groups=console.openshift.io,resources=consoleplugins,verbs=get;create;delete;update;patch;list;watch @@ -97,21 +97,22 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, req ctrl.Reques } ns := getNamespaceName(desired) - r.certWatcher.Reset(ns) + r.watcher.Reset(ns) - clientHelper := r.newClientHelper(desired) + var didChange, isInProgress bool previousNamespace := desired.Status.Namespace + reconcilersInfo := r.newCommonInfo(ctx, desired, ns, previousNamespace, func(b bool) { didChange = b }, func(b bool) { isInProgress = b }) - err = r.reconcileOperator(ctx, clientHelper, ns, desired) + err = r.reconcileOperator(ctx, &reconcilersInfo, desired) if err != nil { return ctrl.Result{}, err } // Create reconcilers - flpReconciler := flowlogspipeline.NewReconciler(ctx, clientHelper, ns, previousNamespace, r.config.FlowlogsPipelineImage, &r.permissions, r.availableAPIs) + flpReconciler := flowlogspipeline.NewReconciler(&reconcilersInfo, r.config.FlowlogsPipelineImage) var cpReconciler consoleplugin.CPReconciler if r.availableAPIs.HasConsolePlugin() { - cpReconciler = consoleplugin.NewReconciler(clientHelper, ns, previousNamespace, r.config.ConsolePluginImage, r.availableAPIs) + cpReconciler = consoleplugin.NewReconciler(&reconcilersInfo, r.config.ConsolePluginImage) } // Check namespace changed @@ -128,26 +129,19 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, req ctrl.Reques // OVS config map for CNO if r.availableAPIs.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigCNOController(clientHelper, - ns, - desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, - ovsFlowsConfigMapName, - r.lookupIP) + ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileCNOFailed(err), desired) } } else { - ovsConfigController := ovs.NewFlowsConfigOVNKController(clientHelper, - ns, - desired.Spec.Agent.IPFIX.OVNKubernetes, - r.lookupIP) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.IPFIX.OVNKubernetes) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileOVNKFailed(err), desired) } } // eBPF agent - ebpfAgentController := ebpf.NewAgentController(clientHelper, ns, previousNamespace, &r.permissions, r.config) + ebpfAgentController := ebpf.NewAgentController(&reconcilersInfo, r.config) if err := ebpfAgentController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileAgentFailed(err), desired) } @@ -162,9 +156,9 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Set readiness status var status *metav1.Condition - if clientHelper.DidChange() { + if didChange { status = conditions.Updating() - } else if clientHelper.IsInProgress() { + } else if isInProgress { status = conditions.DeploymentInProgress() } else { status = conditions.Ready() @@ -234,7 +228,7 @@ func (r *FlowCollectorReconciler) SetupWithManager(ctx context.Context, mgr ctrl return err } - r.certWatcher = watchers.RegisterCertificatesWatcher(builder) + r.watcher = watchers.RegisterWatcher(builder) return builder.Complete(r) } @@ -287,13 +281,13 @@ func (r *FlowCollectorReconciler) namespaceExist(ctx context.Context, nsName str return ns, nil } -func (r *FlowCollectorReconciler) reconcileOperator(ctx context.Context, clientHelper reconcilers.ClientHelper, ns string, desired *flowslatest.FlowCollector) error { +func (r *FlowCollectorReconciler) reconcileOperator(ctx context.Context, cmn *reconcilers.Common, desired *flowslatest.FlowCollector) error { // If namespace does not exist, we create it - nsExist, err := r.namespaceExist(ctx, ns) + nsExist, err := r.namespaceExist(ctx, cmn.Namespace) if err != nil { return err } - desiredNs := buildNamespace(ns, r.config.DownstreamDeployment) + desiredNs := buildNamespace(cmn.Namespace, r.config.DownstreamDeployment) if nsExist == nil { err = r.Create(ctx, desiredNs) if err != nil { @@ -306,18 +300,18 @@ func (r *FlowCollectorReconciler) reconcileOperator(ctx context.Context, clientH } } if r.config.DownstreamDeployment { - desiredRole := buildRoleMonitoringReader(ns) - if err := clientHelper.ReconcileClusterRole(ctx, desiredRole); err != nil { + desiredRole := buildRoleMonitoringReader(cmn.Namespace) + if err := cmn.ReconcileClusterRole(ctx, desiredRole); err != nil { return err } - desiredBinding := buildRoleBindingMonitoringReader(ns) - if err := clientHelper.ReconcileClusterRoleBinding(ctx, desiredBinding); err != nil { + desiredBinding := buildRoleBindingMonitoringReader(cmn.Namespace) + if err := cmn.ReconcileClusterRoleBinding(ctx, desiredBinding); err != nil { return err } } if r.availableAPIs.HasSvcMonitor() { desiredHealthDashboardCM := buildHealthDashboard() - if err := clientHelper.ReconcileConfigMap(ctx, desiredHealthDashboardCM); err != nil { + if err := cmn.ReconcileConfigMap(ctx, desiredHealthDashboardCM); err != nil { return err } } @@ -354,11 +348,8 @@ func (r *FlowCollectorReconciler) checkFinalizer(ctx context.Context, desired *f func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowslatest.FlowCollector) error { if !r.availableAPIs.HasCNO() { ns := getNamespaceName(desired) - clientHelper := r.newClientHelper(desired) - ovsConfigController := ovs.NewFlowsConfigOVNKController(clientHelper, - ns, - desired.Spec.Agent.IPFIX.OVNKubernetes, - r.lookupIP) + info := r.newCommonInfo(ctx, desired, ns, ns, func(b bool) {}, func(b bool) {}) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.IPFIX.OVNKubernetes) if err := ovsConfigController.Finalize(ctx, desired); err != nil { return fmt.Errorf("failed to finalize ovn-kubernetes reconciler: %w", err) } @@ -366,12 +357,21 @@ func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowsla return nil } -func (r *FlowCollectorReconciler) newClientHelper(desired *flowslatest.FlowCollector) reconcilers.ClientHelper { - return reconcilers.ClientHelper{ - Client: r.Client, - SetControllerReference: func(obj client.Object) error { - return ctrl.SetControllerReference(desired, obj, r.Scheme) +func (r *FlowCollectorReconciler) newCommonInfo(ctx context.Context, desired *flowslatest.FlowCollector, ns, prevNs string, changeHook, inProgressHook func(bool)) reconcilers.Common { + return reconcilers.Common{ + Client: helper.Client{ + Client: r.Client, + SetControllerReference: func(obj client.Object) error { + return ctrl.SetControllerReference(desired, obj, r.Scheme) + }, + SetChanged: changeHook, + SetInProgress: inProgressHook, }, + Namespace: ns, + PreviousNamespace: prevNs, + UseOpenShiftSCC: r.permissions.Vendor(ctx) == discover.VendorOpenShift, + AvailableAPIs: r.availableAPIs, + Watcher: r.watcher, } } diff --git a/controllers/flowcollector_controller_certificates_test.go b/controllers/flowcollector_controller_certificates_test.go new file mode 100644 index 000000000..e74f5e2b2 --- /dev/null +++ b/controllers/flowcollector_controller_certificates_test.go @@ -0,0 +1,454 @@ +package controllers + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/constants" + . "github.com/netobserv/network-observability-operator/controllers/controllerstest" + "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" + "github.com/netobserv/network-observability-operator/pkg/watchers" +) + +var cmw watchers.ConfigWatchable +var sw watchers.SecretWatchable + +// nolint:cyclop +func flowCollectorCertificatesSpecs() { + const operatorNamespace = "main-namespace" + crKey := types.NamespacedName{ + Name: "cluster", + } + flpKey := types.NamespacedName{ + Name: constants.FLPName + flowlogspipeline.FlpConfSuffix[flowlogspipeline.ConfKafkaTransformer], + Namespace: operatorNamespace, + } + pluginKey := types.NamespacedName{ + Name: constants.PluginName, + Namespace: operatorNamespace, + } + agentKey := types.NamespacedName{ + Name: constants.EBPFAgentName, + Namespace: operatorNamespace + "-privileged", + } + lokiCert := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "loki-ca", + Namespace: "loki-namespace", + }, + Data: map[string]string{ + "cert.crt": "--- LOKI CA CERT ---", + "other": "any", + }, + } + expectedLokiHash, _ := cmw.GetDigest(&lokiCert, []string{"cert.crt"}) + kafkaCert := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kafka-ca", + Namespace: operatorNamespace, + }, + Data: map[string]string{ + "cert.crt": "--- KAFKA CA CERT ---", + "other": "any", + }, + } + expectedKafkaHash, _ := cmw.GetDigest(&kafkaCert, []string{"cert.crt"}) + kafkaUserCert := v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kafka-user", + Namespace: operatorNamespace, + }, + Data: map[string][]byte{ + "user.crt": []byte("--- KAFKA USER CERT ---"), + "user.key": []byte("--- KAFKA USER KEY ---"), + "other": []byte("any"), + }, + } + expectedKafkaUserHash, _ := sw.GetDigest(&kafkaUserCert, []string{"user.crt", "user.key"}) + kafka2Cert := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kafka-exporter-ca", + Namespace: "kafka-exporter-namespace", + }, + Data: map[string]string{ + "cert.crt": "--- KAFKA 2 CA CERT ---", + "other": "any", + }, + } + expectedKafka2Hash, _ := cmw.GetDigest(&kafka2Cert, []string{"cert.crt"}) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + agent := appsv1.DaemonSet{} + flp := appsv1.Deployment{} + plugin := appsv1.Deployment{} + var lastAgentAnnots map[string]string + var lastFLPAnnots map[string]string + var lastPluginAnnots map[string]string + + Context("Verify expectations are sane", func() { + It("Expected hashes should all be different", func() { + cmEmpty, _ := cmw.GetDigest(&v1.ConfigMap{}, []string{"any"}) + sEmpty, _ := sw.GetDigest(&v1.Secret{}, []string{"any"}) + allKeys := map[string]interface{}{} + for _, hash := range []string{"", cmEmpty, sEmpty, expectedLokiHash, expectedKafkaHash, expectedKafka2Hash, expectedKafkaUserHash} { + allKeys[hash] = nil + } + Expect(allKeys).To(HaveLen(7)) + }) + }) + + Context("Deploying with Loki and Kafka certificates", func() { + It("Should create all certs successfully", func() { + By("Creating Loki certificate") + Eventually(func() interface{} { return k8sClient.Create(ctx, &lokiCert) }, timeout, interval).Should(Succeed()) + By("Creating Kafka CA certificate") + Eventually(func() interface{} { return k8sClient.Create(ctx, &kafkaCert) }, timeout, interval).Should(Succeed()) + By("Creating Kafka User certificate") + Eventually(func() interface{} { return k8sClient.Create(ctx, &kafkaUserCert) }, timeout, interval).Should(Succeed()) + By("Creating Kafka-export CA certificate") + Eventually(func() interface{} { return k8sClient.Create(ctx, &kafka2Cert) }, timeout, interval).Should(Succeed()) + }) + + flowSpec := flowslatest.FlowCollectorSpec{ + Namespace: operatorNamespace, + DeploymentModel: flowslatest.DeploymentModelKafka, + Agent: flowslatest.FlowCollectorAgent{ + Type: "EBPF", + }, + Loki: flowslatest.FlowCollectorLoki{ + AuthToken: flowslatest.LokiAuthForwardUserToken, + TLS: flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: lokiCert.Name, + Namespace: lokiCert.Namespace, + CertFile: "cert.crt", + }, + }, + }, + Kafka: flowslatest.FlowCollectorKafka{ + TLS: flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: kafkaCert.Name, + CertFile: "cert.crt", + // No namespace means operator's namespace + }, + UserCert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeSecret, + Name: kafkaUserCert.Name, + CertFile: "user.crt", + CertKey: "user.key", + // No namespace means operator's namespace + }, + }, + }, + Exporters: []*flowslatest.FlowCollectorExporter{{ + Type: flowslatest.KafkaExporter, + Kafka: flowslatest.FlowCollectorKafka{ + TLS: flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: kafka2Cert.Name, + Namespace: kafka2Cert.Namespace, + CertFile: "cert.crt", + }, + }, + }, + }}, + } + + It("Should create CR successfully", func() { + Eventually(func() interface{} { + return k8sClient.Create(ctx, &flowslatest.FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: crKey.Name, + }, + Spec: flowSpec, + }) + }, timeout, interval).Should(Succeed()) + }) + + It("Should copy certificates when necessary", func() { + By("Expecting Loki CA cert copied to operator namespace") + Eventually(func() interface{} { + var cm v1.ConfigMap + if err := k8sClient.Get(ctx, types.NamespacedName{Name: lokiCert.Name, Namespace: operatorNamespace}, &cm); err != nil { + return err + } + return cm.Data + }, timeout, interval).Should(Equal(map[string]string{ + "cert.crt": "--- LOKI CA CERT ---", + "other": "any", + })) + By("Expecting Kafka CA cert copied to privileged namespace") + Eventually(func() interface{} { + var cm v1.ConfigMap + if err := k8sClient.Get(ctx, types.NamespacedName{Name: kafkaCert.Name, Namespace: operatorNamespace + "-privileged"}, &cm); err != nil { + return err + } + return cm.Data + }, timeout, interval).Should(Equal(map[string]string{ + "cert.crt": "--- KAFKA CA CERT ---", + "other": "any", + })) + By("Expecting Kafka User cert copied to privileged namespace") + Eventually(func() interface{} { + var s v1.Secret + if err := k8sClient.Get(ctx, types.NamespacedName{Name: kafkaUserCert.Name, Namespace: operatorNamespace + "-privileged"}, &s); err != nil { + return err + } + return s.Data + }, timeout, interval).Should(Equal(map[string][]byte{ + "user.crt": []byte("--- KAFKA USER CERT ---"), + "user.key": []byte("--- KAFKA USER KEY ---"), + "other": []byte("any"), + })) + }) + + It("Should have all certificates mounted", func() { + By("Expecting Kafka certificates for Agent mounted") + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, agentKey, &agent); err != nil { + return err + } + return agent.Spec.Template.Spec.Volumes + }, timeout, interval).Should(HaveLen(2)) + Expect(agent.Spec.Template.Annotations).To(HaveLen(2)) + Expect(agent.Spec.Template.Annotations["flows.netobserv.io/watched-kafka-ca"]).To(Equal(expectedKafkaHash)) + Expect(agent.Spec.Template.Annotations["flows.netobserv.io/watched-kafka-user"]).To(Equal(expectedKafkaUserHash)) + Expect(agent.Spec.Template.Spec.Volumes[0].Name).To(Equal("kafka-certs-ca")) + Expect(agent.Spec.Template.Spec.Volumes[1].Name).To(Equal("kafka-certs-user")) + lastAgentAnnots = agent.Spec.Template.Annotations + + By("Expecting Loki certificate for Plugin mounted") + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, pluginKey, &plugin); err != nil { + return err + } + return plugin.Spec.Template.Spec.Volumes + }, timeout, interval).Should(HaveLen(4)) + Expect(plugin.Spec.Template.Annotations).To(HaveLen(1)) + Expect(plugin.Spec.Template.Spec.Volumes[0].Name).To(Equal("console-serving-cert")) + Expect(plugin.Spec.Template.Spec.Volumes[1].Name).To(Equal("config-volume")) + Expect(plugin.Spec.Template.Spec.Volumes[2].Name).To(Equal("loki-certs-ca")) + Expect(plugin.Spec.Template.Spec.Volumes[3].Name).To(Equal("loki-status-certs-ca")) + lastPluginAnnots = plugin.Spec.Template.Annotations + + By("Expecting Loki and Kafka certificates for FLP mounted") + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey, &flp); err != nil { + return err + } + return flp.Spec.Template.Spec.Volumes + }, timeout, interval).Should(HaveLen(6)) + Expect(flp.Spec.Template.Annotations).To(HaveLen(6)) + Expect(flp.Spec.Template.Annotations["flows.netobserv.io/watched-kafka-ca"]).To(Equal(expectedKafkaHash)) + Expect(flp.Spec.Template.Annotations["flows.netobserv.io/watched-kafka-user"]).To(Equal(expectedKafkaUserHash)) + Expect(flp.Spec.Template.Annotations["flows.netobserv.io/watched-kafka-export-0-ca"]).To(Equal(expectedKafka2Hash)) + Expect(flp.Spec.Template.Spec.Volumes[0].Name).To(Equal("config-volume")) + Expect(flp.Spec.Template.Spec.Volumes[1].Name).To(Equal("kafka-cert-ca")) + Expect(flp.Spec.Template.Spec.Volumes[2].Name).To(Equal("kafka-cert-user")) + Expect(flp.Spec.Template.Spec.Volumes[3].Name).To(Equal("flowlogs-pipeline")) // token + Expect(flp.Spec.Template.Spec.Volumes[4].Name).To(Equal("loki-certs-ca")) + Expect(flp.Spec.Template.Spec.Volumes[5].Name).To(Equal("kafka-export-0-ca")) + lastFLPAnnots = flp.Spec.Template.Annotations + }) + }) + + Context("Updating Kafka certificates", func() { + It("Should update Kafka certificate", func() { + By("Updating Kafka CA certificate") + kafkaCert.Data["cert.crt"] = "--- KAFKA CA CERT MODIFIED ---" + Eventually(func() interface{} { return k8sClient.Update(ctx, &kafkaCert) }, timeout, interval).Should(Succeed()) + By("Updating Kafka User certificate") + kafkaUserCert.Data["user.crt"] = []byte("--- KAFKA USER CERT MODIFIED ---") + Eventually(func() interface{} { return k8sClient.Update(ctx, &kafkaUserCert) }, timeout, interval).Should(Succeed()) + }) + + It("Should copy certificates when necessary", func() { + By("Expecting Kafka CA cert updated to privileged namespace") + Eventually(func() interface{} { + var cm v1.ConfigMap + if err := k8sClient.Get(ctx, types.NamespacedName{Name: kafkaCert.Name, Namespace: operatorNamespace + "-privileged"}, &cm); err != nil { + return err + } + return cm.Data + }, timeout, interval).Should(Equal(map[string]string{ + "cert.crt": "--- KAFKA CA CERT MODIFIED ---", + "other": "any", + })) + By("Expecting Kafka User cert updated to privileged namespace") + Eventually(func() interface{} { + var s v1.Secret + if err := k8sClient.Get(ctx, types.NamespacedName{Name: kafkaUserCert.Name, Namespace: operatorNamespace + "-privileged"}, &s); err != nil { + return err + } + return s.Data + }, timeout, interval).Should(Equal(map[string][]byte{ + "user.crt": []byte("--- KAFKA USER CERT MODIFIED ---"), + "user.key": []byte("--- KAFKA USER KEY ---"), + "other": []byte("any"), + })) + }) + + It("Should redeploy eBPF Agent", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, agentKey, &agent); err != nil { + return err + } + return agent.Spec.Template.Annotations + }, timeout, interval).Should(Not(Equal(lastAgentAnnots))) + lastAgentAnnots = agent.Spec.Template.Annotations + }) + + It("Should redeploy FLP", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey, &flp); err != nil { + return err + } + return flp.Spec.Template.Annotations + }, timeout, interval).Should(Not(Equal(lastFLPAnnots))) + lastFLPAnnots = flp.Spec.Template.Annotations + }) + }) + + Context("Updating Loki certificate", func() { + It("Should update Loki certificate", func() { + By("Updating Loki CA certificate") + lokiCert.Data["cert.crt"] = "--- LOKI CA CERT MODIFIED ---" + Eventually(func() interface{} { return k8sClient.Update(ctx, &lokiCert) }, timeout, interval).Should(Succeed()) + }) + + It("Should copy certificates when necessary", func() { + By("Expecting Loki CA cert updated to operator namespace") + Eventually(func() interface{} { + var cm v1.ConfigMap + if err := k8sClient.Get(ctx, types.NamespacedName{Name: lokiCert.Name, Namespace: operatorNamespace}, &cm); err != nil { + return err + } + return cm.Data + }, timeout, interval).Should(Equal(map[string]string{ + "cert.crt": "--- LOKI CA CERT MODIFIED ---", + "other": "any", + })) + }) + + // Console plugin is not restarted, as Loki certificate is always read from file + It("Should not redeploy Console plugin", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, pluginKey, &plugin); err != nil { + return err + } + return plugin.Spec.Template.Annotations + }, timeout, interval).Should(Equal(lastPluginAnnots)) + lastPluginAnnots = plugin.Spec.Template.Annotations + }) + + // FLP is not restarted, as Loki certificate is always read from file + It("Should not redeploy FLP", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey, &flp); err != nil { + return err + } + return flp.Spec.Template.Annotations + }, timeout, interval).Should(Equal(lastFLPAnnots)) + lastFLPAnnots = flp.Spec.Template.Annotations + }) + }) + + Context("Dummy update of Kafka Secret/CM without cert change", func() { + It("Should update Kafka Secret/CM", func() { + By("Updating Kafka CM") + kafkaCert.Annotations = map[string]string{"hey": "new annotation"} + kafkaCert.Data["other"] = "any MODIFIED" + Eventually(func() interface{} { return k8sClient.Update(ctx, &kafkaCert) }, timeout, interval).Should(Succeed()) + By("Updating Kafka Secret") + kafkaUserCert.Annotations = map[string]string{"ho": "new annotation"} + kafkaUserCert.Data["other"] = []byte("any MODIFIED") + Eventually(func() interface{} { return k8sClient.Update(ctx, &kafkaUserCert) }, timeout, interval).Should(Succeed()) + }) + + It("Should not redeploy Agent", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, agentKey, &agent); err != nil { + return err + } + return agent.Spec.Template.Annotations + }, timeout, interval).Should(Equal(lastAgentAnnots)) + lastAgentAnnots = agent.Spec.Template.Annotations + }) + + It("Should not redeploy FLP", func() { + Eventually(func() interface{} { + if err := k8sClient.Get(ctx, flpKey, &flp); err != nil { + return err + } + return flp.Spec.Template.Annotations + }, timeout, interval).Should(Equal(lastFLPAnnots)) + lastFLPAnnots = flp.Spec.Template.Annotations + }) + }) + + Context("Cleanup", func() { + // Retrieve CR to get its UID + flowCR := flowslatest.FlowCollector{} + It("Should get CR", func() { + Eventually(func() error { + return k8sClient.Get(ctx, crKey, &flowCR) + }, timeout, interval).Should(Succeed()) + }) + + It("Should delete CR", func() { + Eventually(func() error { + return k8sClient.Delete(ctx, &flowCR) + }, timeout, interval).Should(Succeed()) + }) + + It("Should be garbage collected", func() { + By("Expecting flowlogs-pipeline deployment to be garbage collected") + Eventually(func() interface{} { + d := appsv1.Deployment{} + _ = k8sClient.Get(ctx, flpKey, &d) + return &d + }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) + + By("Expecting agent daemonset to be garbage collected") + Eventually(func() interface{} { + d := appsv1.DaemonSet{} + _ = k8sClient.Get(ctx, agentKey, &d) + return &d + }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) + + By("Expecting console plugin deployment to be garbage collected") + Eventually(func() interface{} { + d := appsv1.Deployment{} + _ = k8sClient.Get(ctx, pluginKey, &d) + return &d + }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) + }) + + It("Should not get CR", func() { + Eventually(func() bool { + err := k8sClient.Get(ctx, crKey, &flowCR) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + }) +} diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 2e394b01e..d8c209ae5 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -5,17 +5,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" ascv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" - "github.com/netobserv/network-observability-operator/controllers/constants" - . "github.com/netobserv/network-observability-operator/controllers/controllerstest" ) // nolint:cyclop @@ -24,14 +20,6 @@ func flowCollectorIsoSpecs() { crKey := types.NamespacedName{ Name: "cluster", } - flpKey := types.NamespacedName{ - Name: constants.FLPName, - Namespace: operatorNamespace, - } - cpKey := types.NamespacedName{ - Name: "netobserv-plugin", - Namespace: operatorNamespace, - } BeforeEach(func() { // Add any setup steps that needs to be executed before each test @@ -205,7 +193,7 @@ func flowCollectorIsoSpecs() { }) It("Should not have modified input CR values", func() { - cr := GetReadyCR(crKey) + cr := GetCR(crKey) // For easier debugging, we check CR parts one by one Expect(cr.Spec.Processor).Should(Equal(specInput.Processor)) @@ -234,59 +222,5 @@ func flowCollectorIsoSpecs() { return k8sClient.Delete(ctx, &flowCR) }, timeout, interval).Should(Succeed()) }) - - It("Should be garbage collected", func() { - By("Expecting flowlogs-pipeline daemonset to be garbage collected") - Eventually(func() interface{} { - d := appsv1.DaemonSet{} - _ = k8sClient.Get(ctx, flpKey, &d) - return &d - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - - By("Expecting flowlogs-pipeline service account to be garbage collected") - Eventually(func() interface{} { - svcAcc := v1.ServiceAccount{} - _ = k8sClient.Get(ctx, flpKey, &svcAcc) - return &svcAcc - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - - By("Expecting console plugin deployment to be garbage collected") - Eventually(func() interface{} { - d := appsv1.Deployment{} - _ = k8sClient.Get(ctx, cpKey, &d) - return &d - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - - By("Expecting console plugin service to be garbage collected") - Eventually(func() interface{} { - svc := v1.Service{} - _ = k8sClient.Get(ctx, cpKey, &svc) - return &svc - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - - By("Expecting console plugin service account to be garbage collected") - Eventually(func() interface{} { - svcAcc := v1.ServiceAccount{} - _ = k8sClient.Get(ctx, cpKey, &svcAcc) - return &svcAcc - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - - By("Expecting flowlogs-pipeline configmap to be garbage collected") - Eventually(func() interface{} { - cm := v1.ConfigMap{} - _ = k8sClient.Get(ctx, types.NamespacedName{ - Name: "flowlogs-pipeline-config", - Namespace: operatorNamespace, - }, &cm) - return &cm - }, timeout, interval).Should(BeGarbageCollectedBy(&flowCR)) - }) - - It("Should not get CR", func() { - Eventually(func() bool { - err := k8sClient.Get(ctx, crKey, &flowCR) - return errors.IsNotFound(err) - }, timeout, interval).Should(BeTrue()) - }) }) } diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 18bee459e..c346bdd3a 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -1,7 +1,6 @@ package controllers import ( - "errors" "fmt" "strings" "time" @@ -14,7 +13,6 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" kerr "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" @@ -23,7 +21,6 @@ import ( "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" - "github.com/netobserv/network-observability-operator/pkg/conditions" ) const ( @@ -611,7 +608,7 @@ func flowCollectorControllerSpecs() { fc.Spec.Loki.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ - Type: flowslatest.CertRefTypeConfigMap, + Type: flowslatest.RefTypeConfigMap, Name: "loki-ca", CertFile: "ca.crt", }, @@ -796,27 +793,16 @@ func flowCollectorControllerSpecs() { }) } -func GetReadyCR(key types.NamespacedName) *flowslatest.FlowCollector { +func GetCR(key types.NamespacedName) *flowslatest.FlowCollector { cr := flowslatest.FlowCollector{} Eventually(func() error { - err := k8sClient.Get(ctx, key, &cr) - if err != nil { - return err - } - cond := meta.FindStatusCondition(cr.Status.Conditions, conditions.TypeReady) - if cond == nil { - return errors.New("CR is not ready: condition is nil") - } - if cond.Status == metav1.ConditionFalse { - return fmt.Errorf("CR is not ready: %s - %v", cond.Reason, cond.Message) - } - return nil + return k8sClient.Get(ctx, key, &cr) }).Should(Succeed()) return &cr } func UpdateCR(key types.NamespacedName, updater func(*flowslatest.FlowCollector)) { - cr := GetReadyCR(key) + cr := GetCR(key) Eventually(func() error { updater(cr) return k8sClient.Update(ctx, cr) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index 91cee6a47..2876db1b5 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -22,8 +22,10 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/filters" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/volumes" ) const ( @@ -31,9 +33,6 @@ const ( configPath = "/etc/flowlogs-pipeline" configFile = "config.json" metricsConfigDir = "metrics_definitions" - kafkaCerts = "kafka-certs" - lokiCerts = "loki-certs" - promCerts = "prom-certs" lokiToken = "loki-token" healthServiceName = "health" prometheusServiceName = "prometheus" @@ -71,18 +70,17 @@ var FlpConfSuffix = map[ConfKind]string{ } type builder struct { - namespace string - labels map[string]string - selector map[string]string - desired *flowslatest.FlowCollectorSpec - promTLS *flowslatest.CertificateReference - confKind ConfKind - useOpenShiftSCC bool - image string + info *reconcilers.Instance + labels map[string]string + selector map[string]string + desired *flowslatest.FlowCollectorSpec + promTLS *flowslatest.CertificateReference + confKind ConfKind + volumes volumes.Builder } -func newBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, ck ConfKind, useOpenShiftSCC bool) builder { - version := helper.ExtractVersion(image) +func newBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSpec, ck ConfKind) builder { + version := helper.ExtractVersion(info.Image) name := name(ck) var promTLS flowslatest.CertificateReference switch desired.Processor.Metrics.Server.TLS.Type { @@ -97,7 +95,7 @@ func newBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, ck Con } } return builder{ - namespace: ns, + info: info, labels: map[string]string{ "app": name, "version": helper.MaxLabelLength(version), @@ -105,11 +103,9 @@ func newBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, ck Con selector: map[string]string{ "app": name, }, - desired: desired, - confKind: ck, - useOpenShiftSCC: useOpenShiftSCC, - promTLS: &promTLS, - image: image, + desired: desired, + confKind: ck, + promTLS: &promTLS, } } @@ -133,7 +129,7 @@ func (b *builder) portProtocol() corev1.Protocol { return corev1.ProtocolUDP } -func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, configDigest string) corev1.PodTemplateSpec { +func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, annotations map[string]string) corev1.PodTemplateSpec { var ports []corev1.ContainerPort var tolerations []corev1.Toleration if hasHostPort { @@ -166,11 +162,11 @@ func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, c }) } - volumeMounts := []corev1.VolumeMount{{ + volumeMounts := b.volumes.AppendMounts([]corev1.VolumeMount{{ MountPath: configPath, Name: configVolume, - }} - volumes := []corev1.Volume{{ + }}) + volumes := b.volumes.AppendVolumes([]corev1.Volume{{ Name: configVolume, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ @@ -179,24 +175,7 @@ func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, c }, }, }, - }} - - if helper.UseKafka(b.desired) && b.desired.Kafka.TLS.Enable { - volumes, volumeMounts = helper.AppendCertVolumes(volumes, volumeMounts, &b.desired.Kafka.TLS, kafkaCerts) - } - - if hasLokiInterface { - if b.desired.Loki.TLS.Enable && !b.desired.Loki.TLS.InsecureSkipVerify { - volumes, volumeMounts = helper.AppendCertVolumes(volumes, volumeMounts, &b.desired.Loki.TLS, lokiCerts) - } - if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) { - volumes, volumeMounts = helper.AppendTokenVolume(volumes, volumeMounts, lokiToken, constants.FLPName) - } - } - - if b.desired.Processor.Metrics.Server.TLS.Type != flowslatest.ServerTLSDisabled { - volumes, volumeMounts = helper.AppendSingleCertVolumes(volumes, volumeMounts, b.promTLS, promCerts) - } + }}) var envs []corev1.EnvVar // we need to sort env map to keep idempotency, @@ -207,7 +186,7 @@ func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, c container := corev1.Container{ Name: constants.FLPName, - Image: b.image, + Image: b.info.Image, ImagePullPolicy: corev1.PullPolicy(b.desired.Processor.ImagePullPolicy), Args: []string{fmt.Sprintf(`--config=%s/%s`, configPath, configFile)}, Resources: *b.desired.Processor.Resources.DeepCopy(), @@ -242,15 +221,13 @@ func (b *builder) podTemplate(hasHostPort, hasLokiInterface, hostNetwork bool, c if hostNetwork { dnsPolicy = corev1.DNSClusterFirstWithHostNet } + annotations["prometheus.io/scrape"] = "true" + annotations["prometheus.io/scrape_port"] = fmt.Sprint(b.desired.Processor.Metrics.Server.Port) return corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: b.labels, - Annotations: map[string]string{ - constants.PodConfigurationDigest: configDigest, - "prometheus.io/scrape": "true", - "prometheus.io/scrape_port": fmt.Sprint(b.desired.Processor.Metrics.Server.Port), - }, + Labels: b.labels, + Annotations: annotations, }, Spec: corev1.PodSpec{ Tolerations: tolerations, @@ -463,9 +440,10 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) (*corev var authorization *promConfig.Authorization if helper.LokiUseHostToken(&b.desired.Loki) || helper.LokiForwardUserToken(&b.desired.Loki) { + b.volumes.AddToken(constants.FLPName) authorization = &promConfig.Authorization{ Type: "Bearer", - CredentialsFile: helper.TokensPath + constants.FLPName, + CredentialsFile: constants.TokensPath + constants.FLPName, } } @@ -478,10 +456,11 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) (*corev }, } } else { + caPath := b.volumes.AddCACertificate(&b.desired.Loki.TLS, "loki-certs") lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ Authorization: authorization, TLSConfig: promConfig.TLSConfig{ - CAFile: helper.GetCACertPath(&b.desired.Loki.TLS, lokiCerts), + CAFile: caPath, }, } } @@ -533,10 +512,11 @@ func (b *builder) makeMetricsDashboardConfigMap(dashboard string) *corev1.Config } return &configMap } + func (b *builder) addCustomExportStages(enrichedStage *config.PipelineBuilderStage) { for i, exporter := range b.desired.Exporters { if exporter.Type == flowslatest.KafkaExporter { - createKafkaWriteStage(fmt.Sprintf("kafka-export-%d", i), &exporter.Kafka, enrichedStage) + b.createKafkaWriteStage(fmt.Sprintf("kafka-export-%d", i), &exporter.Kafka, enrichedStage) } if exporter.Type == flowslatest.IpfixExporter { createIPFIXWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) @@ -544,11 +524,11 @@ func (b *builder) addCustomExportStages(enrichedStage *config.PipelineBuilderSta } } -func createKafkaWriteStage(name string, spec *flowslatest.FlowCollectorKafka, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { +func (b *builder) createKafkaWriteStage(name string, spec *flowslatest.FlowCollectorKafka, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { return fromStage.EncodeKafka(name, api.EncodeKafka{ Address: spec.Address, Topic: spec.Topic, - TLS: getKafkaTLS(&spec.TLS), + TLS: b.getKafkaTLS(&spec.TLS, name), }) } @@ -561,13 +541,14 @@ func createIPFIXWriteStage(name string, spec *flowslatest.FlowCollectorIPFIXRece }) } -func getKafkaTLS(tls *flowslatest.ClientTLS) *api.ClientTLS { +func (b *builder) getKafkaTLS(tls *flowslatest.ClientTLS, volumeName string) *api.ClientTLS { if tls.Enable { + caPath, userCertPath, userKeyPath := b.volumes.AddMutualTLSCertificates(tls, volumeName) return &api.ClientTLS{ InsecureSkipVerify: tls.InsecureSkipVerify, - CACertPath: helper.GetCACertPath(tls, kafkaCerts), - UserCertPath: helper.GetUserCertPath(tls, kafkaCerts), - UserKeyPath: helper.GetUserKeyPath(tls, kafkaCerts), + CACertPath: caPath, + UserCertPath: userCertPath, + UserKeyPath: userKeyPath, } } return nil @@ -582,9 +563,10 @@ func (b *builder) configMap(stages []config.Stage, parameters []config.StagePara NoPanic: true, } if b.desired.Processor.Metrics.Server.TLS.Type != flowslatest.ServerTLSDisabled { + cert, key := b.volumes.AddCertificate(b.promTLS, "prom-certs") metricsSettings.TLS = &api.PromTLSConf{ - CertPath: helper.GetSingleCertPath(b.promTLS, promCerts), - KeyPath: helper.GetSingleKeyPath(b.promTLS, promCerts), + CertPath: cert, + KeyPath: key, } } config := map[string]interface{}{ @@ -611,7 +593,7 @@ func (b *builder) configMap(stages []config.Stage, parameters []config.StagePara configMap := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: b.configMapName(), - Namespace: b.namespace, + Namespace: b.info.Namespace, Labels: b.labels, }, Data: map[string]string{ @@ -628,7 +610,7 @@ func (b *builder) promService() *corev1.Service { svc := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: b.promServiceName(), - Namespace: b.namespace, + Namespace: b.info.Namespace, Labels: b.labels, }, Spec: corev1.ServiceSpec{ @@ -656,7 +638,7 @@ func (b *builder) serviceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: b.name(), - Namespace: b.namespace, + Namespace: b.info.Namespace, Labels: map[string]string{ "app": b.name(), }, @@ -686,7 +668,7 @@ func (b *builder) clusterRoleBinding(ck ConfKind, mono bool) *rbacv1.ClusterRole Subjects: []rbacv1.Subject{{ Kind: "ServiceAccount", Name: b.name(), - Namespace: b.namespace, + Namespace: b.info.Namespace, }}, } } @@ -695,7 +677,7 @@ func (b *builder) serviceMonitor() *monitoringv1.ServiceMonitor { flpServiceMonitorObject := monitoringv1.ServiceMonitor{ ObjectMeta: metav1.ObjectMeta{ Name: b.serviceMonitorName(), - Namespace: b.namespace, + Namespace: b.info.Namespace, Labels: b.labels, }, Spec: monitoringv1.ServiceMonitorSpec{ @@ -708,7 +690,7 @@ func (b *builder) serviceMonitor() *monitoringv1.ServiceMonitor { }, NamespaceSelector: monitoringv1.NamespaceSelector{ MatchNames: []string{ - b.namespace, + b.info.Namespace, }, }, Selector: metav1.LabelSelector{ @@ -769,7 +751,7 @@ func (b *builder) prometheusRule() *monitoringv1.PrometheusRule { ObjectMeta: metav1.ObjectMeta{ Name: b.prometheusRuleName(), Labels: b.labels, - Namespace: b.namespace, + Namespace: b.info.Namespace, }, Spec: monitoringv1.PrometheusRuleSpec{ Groups: []monitoringv1.RuleGroup{ diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index b6d997dc2..45fd09eb1 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -9,6 +9,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -16,19 +17,19 @@ type ingestBuilder struct { generic builder } -func newIngestBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, useOpenShiftSCC bool) ingestBuilder { - gen := newBuilder(ns, image, desired, ConfKafkaIngester, useOpenShiftSCC) +func newIngestBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSpec) ingestBuilder { + gen := newBuilder(info, desired, ConfKafkaIngester) return ingestBuilder{ generic: gen, } } -func (b *ingestBuilder) daemonSet(configDigest string) *appsv1.DaemonSet { - pod := b.generic.podTemplate(true /*listens*/, false /*loki itf*/, !b.generic.useOpenShiftSCC, configDigest) +func (b *ingestBuilder) daemonSet(annotations map[string]string) *appsv1.DaemonSet { + pod := b.generic.podTemplate(true /*listens*/, false /*loki itf*/, !b.generic.info.UseOpenShiftSCC, annotations) return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: b.generic.name(), - Namespace: b.generic.namespace, + Namespace: b.generic.info.Namespace, Labels: b.generic.labels, }, Spec: appsv1.DaemonSetSpec{ @@ -63,7 +64,7 @@ func (b *ingestBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePar }) } - pipeline = createKafkaWriteStage("kafka-write", &b.generic.desired.Kafka, &pipeline) + pipeline = b.generic.createKafkaWriteStage("kafka-write", &b.generic.desired.Kafka, &pipeline) return pipeline.GetStages(), pipeline.GetStageParams(), nil } diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index 9b096cde3..dbc714f05 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -19,7 +19,7 @@ import ( // flpIngesterReconciler reconciles the current flowlogs-pipeline-ingester state with the desired configuration type flpIngesterReconciler struct { singleReconciler - reconcilersCommonInfo + *reconcilers.Instance owned ingestOwnedObjects } @@ -33,7 +33,7 @@ type ingestOwnedObjects struct { prometheusRule *monitoringv1.PrometheusRule } -func newIngesterReconciler(info *reconcilersCommonInfo) *flpIngesterReconciler { +func newIngesterReconciler(cmn *reconcilers.Instance) *flpIngesterReconciler { name := name(ConfKafkaIngester) owned := ingestOwnedObjects{ daemonSet: &appsv1.DaemonSet{}, @@ -44,21 +44,21 @@ func newIngesterReconciler(info *reconcilersCommonInfo) *flpIngesterReconciler { serviceMonitor: &monitoringv1.ServiceMonitor{}, prometheusRule: &monitoringv1.PrometheusRule{}, } - info.nobjMngr.AddManagedObject(name, owned.daemonSet) - info.nobjMngr.AddManagedObject(name, owned.serviceAccount) - info.nobjMngr.AddManagedObject(promServiceName(ConfKafkaIngester), owned.promService) - info.nobjMngr.AddManagedObject(RoleBindingName(ConfKafkaIngester), owned.roleBinding) - info.nobjMngr.AddManagedObject(configMapName(ConfKafkaIngester), owned.configMap) - if info.availableAPIs.HasSvcMonitor() { - info.nobjMngr.AddManagedObject(serviceMonitorName(ConfKafkaIngester), owned.serviceMonitor) + cmn.Managed.AddManagedObject(name, owned.daemonSet) + cmn.Managed.AddManagedObject(name, owned.serviceAccount) + cmn.Managed.AddManagedObject(promServiceName(ConfKafkaIngester), owned.promService) + cmn.Managed.AddManagedObject(RoleBindingName(ConfKafkaIngester), owned.roleBinding) + cmn.Managed.AddManagedObject(configMapName(ConfKafkaIngester), owned.configMap) + if cmn.AvailableAPIs.HasSvcMonitor() { + cmn.Managed.AddManagedObject(serviceMonitorName(ConfKafkaIngester), owned.serviceMonitor) } - if info.availableAPIs.HasPromRule() { - info.nobjMngr.AddManagedObject(prometheusRuleName(ConfKafkaIngester), owned.prometheusRule) + if cmn.AvailableAPIs.HasPromRule() { + cmn.Managed.AddManagedObject(prometheusRuleName(ConfKafkaIngester), owned.prometheusRule) } return &flpIngesterReconciler{ - reconcilersCommonInfo: *info, - owned: owned, + Instance: cmn, + owned: owned, } } @@ -69,28 +69,31 @@ func (r *flpIngesterReconciler) context(ctx context.Context) context.Context { // cleanupNamespace cleans up old namespace func (r *flpIngesterReconciler) cleanupNamespace(ctx context.Context) { - r.nobjMngr.CleanupPreviousNamespace(ctx) + r.Managed.CleanupPreviousNamespace(ctx) } func (r *flpIngesterReconciler) reconcile(ctx context.Context, desired *flowslatest.FlowCollector) error { // Retrieve current owned objects - err := r.nobjMngr.FetchAll(ctx) + err := r.Managed.FetchAll(ctx) if err != nil { return err } // Ingester only used with Kafka and without eBPF if !helper.UseKafka(&desired.Spec) || helper.UseEBPF(&desired.Spec) { - r.nobjMngr.TryDeleteAll(ctx) + r.Managed.TryDeleteAll(ctx) return nil } - builder := newIngestBuilder(r.nobjMngr.Namespace, r.image, &desired.Spec, r.useOpenShiftSCC) + builder := newIngestBuilder(r.Instance, &desired.Spec) newCM, configDigest, err := builder.configMap() if err != nil { return err } - if !r.nobjMngr.Exists(r.owned.configMap) { + annotations := map[string]string{ + constants.PodConfigurationDigest: configDigest, + } + if !r.Managed.Exists(r.owned.configMap) { if err := r.CreateOwned(ctx, newCM); err != nil { return err } @@ -109,25 +112,30 @@ func (r *flpIngesterReconciler) reconcile(ctx context.Context, desired *flowslat return err } - return r.reconcileDaemonSet(ctx, builder.daemonSet(configDigest)) + // Watch for Kafka certificate if necessary; need to restart pods in case of cert rotation + if err = annotateKafkaCerts(ctx, r.Common, &desired.Spec.Kafka, "kafka", annotations); err != nil { + return err + } + + return r.reconcileDaemonSet(ctx, builder.daemonSet(annotations)) } func (r *flpIngesterReconciler) reconcilePrometheusService(ctx context.Context, builder *ingestBuilder) error { report := helper.NewChangeReport("FLP prometheus service") defer report.LogIfNeeded(ctx) - if err := reconcilers.ReconcileService(ctx, r.nobjMngr, &r.ClientHelper, r.owned.promService, builder.promService(), &report); err != nil { + if err := r.ReconcileService(ctx, r.owned.promService, builder.promService(), &report); err != nil { return err } - if r.availableAPIs.HasSvcMonitor() { + if r.AvailableAPIs.HasSvcMonitor() { serviceMonitor := builder.generic.serviceMonitor() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } - if r.availableAPIs.HasPromRule() { + if r.AvailableAPIs.HasPromRule() { promRules := builder.generic.prometheusRule() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err } } @@ -138,7 +146,7 @@ func (r *flpIngesterReconciler) reconcileDaemonSet(ctx context.Context, desiredD report := helper.NewChangeReport("FLP DaemonSet") defer report.LogIfNeeded(ctx) - if !r.nobjMngr.Exists(r.owned.daemonSet) { + if !r.Managed.Exists(r.owned.daemonSet) { return r.CreateOwned(ctx, desiredDS) } else if helper.PodChanged(&r.owned.daemonSet.Spec.Template, &desiredDS.Spec.Template, constants.FLPName, &report) { return r.UpdateOwned(ctx, r.owned.daemonSet, desiredDS) @@ -150,17 +158,17 @@ func (r *flpIngesterReconciler) reconcileDaemonSet(ctx context.Context, desiredD } func (r *flpIngesterReconciler) reconcilePermissions(ctx context.Context, builder *ingestBuilder) error { - if !r.nobjMngr.Exists(r.owned.serviceAccount) { + if !r.Managed.Exists(r.owned.serviceAccount) { return r.CreateOwned(ctx, builder.serviceAccount()) } // We only configure name, update is not needed for now - cr := buildClusterRoleIngester(r.useOpenShiftSCC) + cr := buildClusterRoleIngester(r.UseOpenShiftSCC) if err := r.ReconcileClusterRole(ctx, cr); err != nil { return err } desired := builder.clusterRoleBinding() - if err := r.ClientHelper.ReconcileClusterRoleBinding(ctx, desired); err != nil { + if err := r.ReconcileClusterRoleBinding(ctx, desired); err != nil { return err } return nil diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index ecdc16421..50b6a3d0b 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -9,6 +9,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -16,19 +17,19 @@ type monolithBuilder struct { generic builder } -func newMonolithBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, useOpenShiftSCC bool) monolithBuilder { - gen := newBuilder(ns, image, desired, ConfMonolith, useOpenShiftSCC) +func newMonolithBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSpec) monolithBuilder { + gen := newBuilder(info, desired, ConfMonolith) return monolithBuilder{ generic: gen, } } -func (b *monolithBuilder) daemonSet(configDigest string) *appsv1.DaemonSet { - pod := b.generic.podTemplate(true /*listens*/, true /*loki itf*/, !b.generic.useOpenShiftSCC, configDigest) +func (b *monolithBuilder) daemonSet(annotations map[string]string) *appsv1.DaemonSet { + pod := b.generic.podTemplate(true /*listens*/, true /*loki itf*/, !b.generic.info.UseOpenShiftSCC, annotations) return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: b.generic.name(), - Namespace: b.generic.namespace, + Namespace: b.generic.info.Namespace, Labels: b.generic.labels, }, Spec: appsv1.DaemonSetSpec{ diff --git a/controllers/flowlogspipeline/flp_monolith_reconciler.go b/controllers/flowlogspipeline/flp_monolith_reconciler.go index fd16d4b9a..f550eeff9 100644 --- a/controllers/flowlogspipeline/flp_monolith_reconciler.go +++ b/controllers/flowlogspipeline/flp_monolith_reconciler.go @@ -19,7 +19,7 @@ import ( // flpMonolithReconciler reconciles the current flowlogs-pipeline monolith state with the desired configuration type flpMonolithReconciler struct { singleReconciler - reconcilersCommonInfo + *reconcilers.Instance owned monolithOwnedObjects } @@ -34,7 +34,7 @@ type monolithOwnedObjects struct { prometheusRule *monitoringv1.PrometheusRule } -func newMonolithReconciler(info *reconcilersCommonInfo) *flpMonolithReconciler { +func newMonolithReconciler(cmn *reconcilers.Instance) *flpMonolithReconciler { name := name(ConfMonolith) owned := monolithOwnedObjects{ daemonSet: &appsv1.DaemonSet{}, @@ -46,22 +46,22 @@ func newMonolithReconciler(info *reconcilersCommonInfo) *flpMonolithReconciler { serviceMonitor: &monitoringv1.ServiceMonitor{}, prometheusRule: &monitoringv1.PrometheusRule{}, } - info.nobjMngr.AddManagedObject(name, owned.daemonSet) - info.nobjMngr.AddManagedObject(name, owned.serviceAccount) - info.nobjMngr.AddManagedObject(promServiceName(ConfMonolith), owned.promService) - info.nobjMngr.AddManagedObject(RoleBindingMonoName(ConfKafkaIngester), owned.roleBindingIn) - info.nobjMngr.AddManagedObject(RoleBindingMonoName(ConfKafkaTransformer), owned.roleBindingTr) - info.nobjMngr.AddManagedObject(configMapName(ConfMonolith), owned.configMap) - if info.availableAPIs.HasSvcMonitor() { - info.nobjMngr.AddManagedObject(serviceMonitorName(ConfMonolith), owned.serviceMonitor) + cmn.Managed.AddManagedObject(name, owned.daemonSet) + cmn.Managed.AddManagedObject(name, owned.serviceAccount) + cmn.Managed.AddManagedObject(promServiceName(ConfMonolith), owned.promService) + cmn.Managed.AddManagedObject(RoleBindingMonoName(ConfKafkaIngester), owned.roleBindingIn) + cmn.Managed.AddManagedObject(RoleBindingMonoName(ConfKafkaTransformer), owned.roleBindingTr) + cmn.Managed.AddManagedObject(configMapName(ConfMonolith), owned.configMap) + if cmn.AvailableAPIs.HasSvcMonitor() { + cmn.Managed.AddManagedObject(serviceMonitorName(ConfMonolith), owned.serviceMonitor) } - if info.availableAPIs.HasPromRule() { - info.nobjMngr.AddManagedObject(prometheusRuleName(ConfMonolith), owned.prometheusRule) + if cmn.AvailableAPIs.HasPromRule() { + cmn.Managed.AddManagedObject(prometheusRuleName(ConfMonolith), owned.prometheusRule) } return &flpMonolithReconciler{ - reconcilersCommonInfo: *info, - owned: owned, + Instance: cmn, + owned: owned, } } @@ -72,28 +72,31 @@ func (r *flpMonolithReconciler) context(ctx context.Context) context.Context { // cleanupNamespace cleans up old namespace func (r *flpMonolithReconciler) cleanupNamespace(ctx context.Context) { - r.nobjMngr.CleanupPreviousNamespace(ctx) + r.Managed.CleanupPreviousNamespace(ctx) } func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslatest.FlowCollector) error { // Retrieve current owned objects - err := r.nobjMngr.FetchAll(ctx) + err := r.Managed.FetchAll(ctx) if err != nil { return err } // Monolith only used without Kafka if helper.UseKafka(&desired.Spec) { - r.nobjMngr.TryDeleteAll(ctx) + r.Managed.TryDeleteAll(ctx) return nil } - builder := newMonolithBuilder(r.nobjMngr.Namespace, r.image, &desired.Spec, r.useOpenShiftSCC) + builder := newMonolithBuilder(r.Instance, &desired.Spec) newCM, configDigest, dbConfigMap, err := builder.configMap() if err != nil { return err } - if !r.nobjMngr.Exists(r.owned.configMap) { + annotations := map[string]string{ + constants.PodConfigurationDigest: configDigest, + } + if !r.Managed.Exists(r.owned.configMap) { if err := r.CreateOwned(ctx, newCM); err != nil { return err } @@ -102,8 +105,8 @@ func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslat return err } } - if r.reconcilersCommonInfo.availableAPIs.HasConsoleConfig() { - if err := r.reconcileDashboardConfig(ctx, dbConfigMap); err != nil { + if r.AvailableAPIs.HasConsoleConfig() { + if err := reconcileDashboardConfig(ctx, &r.Client, dbConfigMap); err != nil { return err } } @@ -117,25 +120,36 @@ func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslat return err } - return r.reconcileDaemonSet(ctx, builder.daemonSet(configDigest)) + // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation + // because certificate is always reloaded from file + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + return err + } + + // Watch for Kafka exporter certificate if necessary; need to restart pods in case of cert rotation + if err = annotateKafkaExporterCerts(ctx, r.Common, desired.Spec.Exporters, annotations); err != nil { + return err + } + + return r.reconcileDaemonSet(ctx, builder.daemonSet(annotations)) } func (r *flpMonolithReconciler) reconcilePrometheusService(ctx context.Context, builder *monolithBuilder) error { report := helper.NewChangeReport("FLP prometheus service") defer report.LogIfNeeded(ctx) - if err := reconcilers.ReconcileService(ctx, r.nobjMngr, &r.ClientHelper, r.owned.promService, builder.promService(), &report); err != nil { + if err := r.ReconcileService(ctx, r.owned.promService, builder.promService(), &report); err != nil { return err } - if r.availableAPIs.HasSvcMonitor() { + if r.AvailableAPIs.HasSvcMonitor() { serviceMonitor := builder.generic.serviceMonitor() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } - if r.availableAPIs.HasPromRule() { + if r.AvailableAPIs.HasPromRule() { promRules := builder.generic.prometheusRule() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err } } @@ -146,7 +160,7 @@ func (r *flpMonolithReconciler) reconcileDaemonSet(ctx context.Context, desiredD report := helper.NewChangeReport("FLP DaemonSet") defer report.LogIfNeeded(ctx) - if !r.nobjMngr.Exists(r.owned.daemonSet) { + if !r.Managed.Exists(r.owned.daemonSet) { return r.CreateOwned(ctx, desiredDS) } else if helper.PodChanged(&r.owned.daemonSet.Spec.Template, &desiredDS.Spec.Template, constants.FLPName, &report) { return r.UpdateOwned(ctx, r.owned.daemonSet, desiredDS) @@ -158,11 +172,11 @@ func (r *flpMonolithReconciler) reconcileDaemonSet(ctx context.Context, desiredD } func (r *flpMonolithReconciler) reconcilePermissions(ctx context.Context, builder *monolithBuilder) error { - if !r.nobjMngr.Exists(r.owned.serviceAccount) { + if !r.Managed.Exists(r.owned.serviceAccount) { return r.CreateOwned(ctx, builder.serviceAccount()) } // We only configure name, update is not needed for now - cr := buildClusterRoleIngester(r.useOpenShiftSCC) + cr := buildClusterRoleIngester(r.UseOpenShiftSCC) if err := r.ReconcileClusterRole(ctx, cr); err != nil { return err } @@ -173,7 +187,7 @@ func (r *flpMonolithReconciler) reconcilePermissions(ctx context.Context, builde // Monolith uses ingester + transformer cluster roles for _, kind := range []ConfKind{ConfKafkaIngester, ConfKafkaTransformer} { desired := builder.clusterRoleBinding(kind) - if err := r.ClientHelper.ReconcileClusterRoleBinding(ctx, desired); err != nil { + if err := r.ReconcileClusterRoleBinding(ctx, desired); err != nil { return err } } diff --git a/controllers/flowlogspipeline/flp_reconciler.go b/controllers/flowlogspipeline/flp_reconciler.go index ad453aaca..3d2fd64bf 100644 --- a/controllers/flowlogspipeline/flp_reconciler.go +++ b/controllers/flowlogspipeline/flp_reconciler.go @@ -12,7 +12,8 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/discover" + "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/watchers" ) // Type alias @@ -31,32 +32,12 @@ type singleReconciler interface { reconcile(ctx context.Context, desired *flowslatest.FlowCollector) error } -type reconcilersCommonInfo struct { - reconcilers.ClientHelper - nobjMngr *reconcilers.NamespacedObjectManager - useOpenShiftSCC bool - image string - availableAPIs *discover.AvailableAPIs -} - -func createCommonInfo(ctx context.Context, cl reconcilers.ClientHelper, ns, prevNS, image string, permissionsVendor *discover.Permissions, availableAPIs *discover.AvailableAPIs) *reconcilersCommonInfo { - nobjMngr := reconcilers.NewNamespacedObjectManager(cl, ns, prevNS) - openshift := permissionsVendor.Vendor(ctx) == discover.VendorOpenShift - return &reconcilersCommonInfo{ - ClientHelper: cl, - nobjMngr: nobjMngr, - useOpenShiftSCC: openshift, - image: image, - availableAPIs: availableAPIs, - } -} - -func NewReconciler(ctx context.Context, cl reconcilers.ClientHelper, ns, prevNS, image string, permissionsVendor *discover.Permissions, availableAPIs *discover.AvailableAPIs) FLPReconciler { +func NewReconciler(cmn *reconcilers.Common, image string) FLPReconciler { return FLPReconciler{ reconcilers: []singleReconciler{ - newMonolithReconciler(createCommonInfo(ctx, cl, ns, prevNS, image, permissionsVendor, availableAPIs)), - newTransformerReconciler(createCommonInfo(ctx, cl, ns, prevNS, image, permissionsVendor, availableAPIs)), - newIngesterReconciler(createCommonInfo(ctx, cl, ns, prevNS, image, permissionsVendor, availableAPIs)), + newMonolithReconciler(cmn.NewInstance(image)), + newTransformerReconciler(cmn.NewInstance(image)), + newIngesterReconciler(cmn.NewInstance(image)), }, } } @@ -90,10 +71,10 @@ func (r *FLPReconciler) Reconcile(ctx context.Context, desired *flowslatest.Flow return nil } -func (r *reconcilersCommonInfo) reconcileDashboardConfig(ctx context.Context, dbConfigMap *corev1.ConfigMap) error { +func reconcileDashboardConfig(ctx context.Context, cl *helper.Client, dbConfigMap *corev1.ConfigMap) error { if dbConfigMap == nil { // Dashboard config not desired => delete if exists - if err := r.Delete(ctx, &corev1.ConfigMap{ + if err := cl.Delete(ctx, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: dashboardCMName, Namespace: dashboardCMNamespace, @@ -106,17 +87,42 @@ func (r *reconcilersCommonInfo) reconcileDashboardConfig(ctx context.Context, db return nil } curr := &corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{ + if err := cl.Get(ctx, types.NamespacedName{ Name: dashboardCMName, Namespace: dashboardCMNamespace, }, curr); err != nil { if errors.IsNotFound(err) { - return r.CreateOwned(ctx, dbConfigMap) + return cl.CreateOwned(ctx, dbConfigMap) } return err } if !equality.Semantic.DeepDerivative(dbConfigMap.Data, curr.Data) { - return r.UpdateOwned(ctx, curr, dbConfigMap) + return cl.UpdateOwned(ctx, curr, dbConfigMap) + } + return nil +} + +func annotateKafkaExporterCerts(ctx context.Context, info *reconcilers.Common, exp []*flowslatest.FlowCollectorExporter, annotations map[string]string) error { + for i, exporter := range exp { + if exporter.Type == flowslatest.KafkaExporter { + if err := annotateKafkaCerts(ctx, info, &exporter.Kafka, fmt.Sprintf("kafka-export-%d", i), annotations); err != nil { + return err + } + } + } + return nil +} + +func annotateKafkaCerts(ctx context.Context, info *reconcilers.Common, spec *flowslatest.FlowCollectorKafka, prefix string, annotations map[string]string) error { + caDigest, userDigest, err := info.Watcher.ProcessMTLSCerts(ctx, info.Client, &spec.TLS, info.Namespace) + if err != nil { + return err + } + if caDigest != "" { + annotations[watchers.Annotation(prefix+"-ca")] = caDigest + } + if userDigest != "" { + annotations[watchers.Annotation(prefix+"-user")] = userDigest } return nil } diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index d7d915050..052e38b79 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -33,6 +33,7 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -150,23 +151,39 @@ func getAutoScalerSpecs() (ascv2.HorizontalPodAutoscaler, flowslatest.FlowCollec return autoScaler, getConfig().Processor.KafkaConsumerAutoscaler } +func monoBuilder(ns string, cfg *flowslatest.FlowCollectorSpec) monolithBuilder { + info := reconcilers.Common{Namespace: ns} + return newMonolithBuilder(info.NewInstance(image), cfg) +} + +func transfBuilder(ns string, cfg *flowslatest.FlowCollectorSpec) transfoBuilder { + info := reconcilers.Common{Namespace: ns} + return newTransfoBuilder(info.NewInstance(image), cfg) +} + +func annotate(digest string) map[string]string { + return map[string]string{ + constants.PodConfigurationDigest: digest, + } +} + func TestDaemonSetNoChange(t *testing.T) { assert := assert.New(t) // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) _, digest, _, err := b.configMap() assert.NoError(err) - first := b.daemonSet(digest) + first := b.daemonSet(annotate(digest)) // Check no change cfg = getConfig() - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - second := b.daemonSet(digest) + second := b.daemonSet(annotate(digest)) report := helper.NewChangeReport("") assert.False(helper.PodChanged(&first.Spec.Template, &second.Spec.Template, constants.FLPName, &report)) @@ -179,17 +196,17 @@ func TestDaemonSetChanged(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) _, digest, _, err := b.configMap() assert.NoError(err) - first := b.daemonSet(digest) + first := b.daemonSet(annotate(digest)) // Check probes enabled change cfg.Processor.EnableKubeProbes = pointer.Bool(true) - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - second := b.daemonSet(digest) + second := b.daemonSet(annotate(digest)) report := helper.NewChangeReport("") assert.True(helper.PodChanged(&first.Spec.Template, &second.Spec.Template, constants.FLPName, &report)) @@ -197,7 +214,7 @@ func TestDaemonSetChanged(t *testing.T) { // Check probes DON'T change infinitely (bc DeepEqual/Derivative checks won't work there) assert.NoError(err) - secondBis := b.daemonSet(digest) + secondBis := b.daemonSet(annotate(digest)) secondBis.Spec.Template.Spec.Containers[0].LivenessProbe = &corev1.Probe{ FailureThreshold: 3, PeriodSeconds: 10, @@ -217,10 +234,10 @@ func TestDaemonSetChanged(t *testing.T) { // Check log level change cfg.Processor.LogLevel = "info" - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - third := b.daemonSet(digest) + third := b.daemonSet(annotate(digest)) report = helper.NewChangeReport("") assert.True(helper.PodChanged(&second.Spec.Template, &third.Spec.Template, constants.FLPName, &report)) @@ -231,10 +248,10 @@ func TestDaemonSetChanged(t *testing.T) { corev1.ResourceCPU: resource.MustParse("500m"), corev1.ResourceMemory: resource.MustParse("500Gi"), } - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - fourth := b.daemonSet(digest) + fourth := b.daemonSet(annotate(digest)) report = helper.NewChangeReport("") assert.True(helper.PodChanged(&third.Spec.Template, &fourth.Spec.Template, constants.FLPName, &report)) @@ -245,10 +262,10 @@ func TestDaemonSetChanged(t *testing.T) { corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("512Mi"), } - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - fifth := b.daemonSet(digest) + fifth := b.daemonSet(annotate(digest)) report = helper.NewChangeReport("") assert.True(helper.PodChanged(&fourth.Spec.Template, &fifth.Spec.Template, constants.FLPName, &report)) @@ -266,10 +283,10 @@ func TestDaemonSetChanged(t *testing.T) { CertFile: "ca.crt", }, } - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - sixth := b.daemonSet(digest) + sixth := b.daemonSet(annotate(digest)) report = helper.NewChangeReport("") assert.True(helper.PodChanged(&fifth.Spec.Template, &sixth.Spec.Template, constants.FLPName, &report)) @@ -284,10 +301,10 @@ func TestDaemonSetChanged(t *testing.T) { CertFile: "ca.crt", }, } - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - seventh := b.daemonSet(digest) + seventh := b.daemonSet(annotate(digest)) report = helper.NewChangeReport("") assert.True(helper.PodChanged(&sixth.Spec.Template, &seventh.Spec.Template, constants.FLPName, &report)) @@ -300,17 +317,17 @@ func TestDeploymentNoChange(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newTransfoBuilder(ns, image, &cfg, true) + b := transfBuilder(ns, &cfg) _, digest, _, err := b.configMap() assert.NoError(err) - first := b.deployment(digest) + first := b.deployment(annotate(digest)) // Check no change cfg = getConfig() - b = newTransfoBuilder(ns, image, &cfg, true) + b = transfBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - second := b.deployment(digest) + second := b.deployment(annotate(digest)) report := helper.NewChangeReport("") assert.False(helper.DeploymentChanged(first, second, constants.FLPName, helper.HPADisabled(&cfg.Processor.KafkaConsumerAutoscaler), *cfg.Processor.KafkaConsumerReplicas, &report)) @@ -323,17 +340,17 @@ func TestDeploymentChanged(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newTransfoBuilder(ns, image, &cfg, true) + b := transfBuilder(ns, &cfg) _, digest, _, err := b.configMap() assert.NoError(err) - first := b.deployment(digest) + first := b.deployment(annotate(digest)) // Check probes enabled change cfg.Processor.EnableKubeProbes = pointer.Bool(true) - b = newTransfoBuilder(ns, image, &cfg, true) + b = transfBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - second := b.deployment(digest) + second := b.deployment(annotate(digest)) report := helper.NewChangeReport("") checkChanged := func(old, new *appsv1.Deployment, spec flowslatest.FlowCollectorSpec) bool { @@ -345,10 +362,10 @@ func TestDeploymentChanged(t *testing.T) { // Check log level change cfg.Processor.LogLevel = "info" - b = newTransfoBuilder(ns, image, &cfg, true) + b = transfBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - third := b.deployment(digest) + third := b.deployment(annotate(digest)) report = helper.NewChangeReport("") assert.True(checkChanged(second, third, cfg)) @@ -359,10 +376,10 @@ func TestDeploymentChanged(t *testing.T) { corev1.ResourceCPU: resource.MustParse("500m"), corev1.ResourceMemory: resource.MustParse("500Gi"), } - b = newTransfoBuilder(ns, image, &cfg, true) + b = transfBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - fourth := b.deployment(digest) + fourth := b.deployment(annotate(digest)) report = helper.NewChangeReport("") assert.True(checkChanged(third, fourth, cfg)) @@ -373,10 +390,10 @@ func TestDeploymentChanged(t *testing.T) { corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("512Mi"), } - b = newTransfoBuilder(ns, image, &cfg, true) + b = transfBuilder(ns, &cfg) _, digest, _, err = b.configMap() assert.NoError(err) - fifth := b.deployment(digest) + fifth := b.deployment(annotate(digest)) report = helper.NewChangeReport("") assert.True(checkChanged(fourth, fifth, cfg)) @@ -388,10 +405,10 @@ func TestDeploymentChanged(t *testing.T) { // Check replicas didn't change because HPA is used cfg2 := cfg cfg2.Processor.KafkaConsumerReplicas = pointer.Int32(5) - b = newTransfoBuilder(ns, image, &cfg2, true) + b = transfBuilder(ns, &cfg2) _, digest, _, err = b.configMap() assert.NoError(err) - sixth := b.deployment(digest) + sixth := b.deployment(annotate(digest)) report = helper.NewChangeReport("") assert.False(checkChanged(fifth, sixth, cfg2)) @@ -404,18 +421,18 @@ func TestDeploymentChangedReplicasNoHPA(t *testing.T) { // Get first ns := "namespace" cfg := getConfigNoHPA() - b := newTransfoBuilder(ns, image, &cfg, true) + b := transfBuilder(ns, &cfg) _, digest, _, err := b.configMap() assert.NoError(err) - first := b.deployment(digest) + first := b.deployment(annotate(digest)) // Check replicas changed (need to copy flp, as Spec.Replicas stores a pointer) cfg2 := cfg cfg2.Processor.KafkaConsumerReplicas = pointer.Int32(5) - b = newTransfoBuilder(ns, image, &cfg2, true) + b = transfBuilder(ns, &cfg2) _, digest, _, err = b.configMap() assert.NoError(err) - second := b.deployment(digest) + second := b.deployment(annotate(digest)) report := helper.NewChangeReport("") assert.True(helper.DeploymentChanged(first, second, constants.FLPName, helper.HPADisabled(&cfg2.Processor.KafkaConsumerAutoscaler), *cfg2.Processor.KafkaConsumerReplicas, &report)) @@ -428,7 +445,7 @@ func TestServiceNoChange(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) first := b.promService() // Check no change @@ -445,12 +462,12 @@ func TestServiceChanged(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) first := b.promService() // Check port changed cfg.Processor.Metrics.Server.Port = 9999 - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) second := b.promService() report := helper.NewChangeReport("") @@ -459,7 +476,7 @@ func TestServiceChanged(t *testing.T) { // Make sure non-service settings doesn't trigger service update cfg.Processor.LogLevel = "error" - b = newMonolithBuilder(ns, image, &cfg, true) + b = monoBuilder(ns, &cfg) third := b.promService() report = helper.NewChangeReport("") @@ -473,7 +490,7 @@ func TestServiceMonitorNoChange(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) first := b.generic.serviceMonitor() // Check no change @@ -490,11 +507,11 @@ func TestServiceMonitorChanged(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) first := b.generic.serviceMonitor() // Check namespace change - b = newMonolithBuilder("namespace2", image, &cfg, true) + b = monoBuilder("namespace2", &cfg) second := b.generic.serviceMonitor() report := helper.NewChangeReport("") @@ -502,7 +519,8 @@ func TestServiceMonitorChanged(t *testing.T) { assert.Contains(report.String(), "ServiceMonitor spec changed") // Check labels change - b = newMonolithBuilder("namespace2", image2, &cfg, true) + info := reconcilers.Common{Namespace: "namespace2"} + b = newMonolithBuilder(info.NewInstance(image2), &cfg) third := b.generic.serviceMonitor() report = helper.NewChangeReport("") @@ -516,7 +534,7 @@ func TestPrometheusRuleNoChange(t *testing.T) { // Get first ns := "namespace" cfg := getConfig() - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) first := b.generic.prometheusRule() // Check no change @@ -532,12 +550,12 @@ func TestPrometheusRuleChanged(t *testing.T) { // Get first cfg := getConfig() - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) first := b.generic.prometheusRule() // Check enabled rule change cfg.Processor.Metrics.DisableAlerts = []flowslatest.FLPAlert{flowslatest.AlertNoFlows} - b = newMonolithBuilder("namespace", image, &cfg, true) + b = monoBuilder("namespace", &cfg) second := b.generic.prometheusRule() report := helper.NewChangeReport("") @@ -545,7 +563,8 @@ func TestPrometheusRuleChanged(t *testing.T) { assert.Contains(report.String(), "PrometheusRule spec changed") // Check labels change - b = newMonolithBuilder("namespace2", image2, &cfg, true) + info := reconcilers.Common{Namespace: "namespace2"} + b = newMonolithBuilder(info.NewInstance(image2), &cfg) third := b.generic.prometheusRule() report = helper.NewChangeReport("") @@ -559,7 +578,7 @@ func TestConfigMapShouldDeserializeAsJSON(t *testing.T) { ns := "namespace" cfg := getConfig() loki := cfg.Loki - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) cm, digest, _, err := b.configMap() assert.NoError(err) assert.NotEmpty(t, digest) @@ -627,26 +646,27 @@ func TestLabels(t *testing.T) { assert := assert.New(t) cfg := getConfig() - builder := newMonolithBuilder("ns", image, &cfg, true) - tBuilder := newTransfoBuilder("ns", image, &cfg, true) - iBuilder := newIngestBuilder("ns", image, &cfg, true) + info := reconcilers.Common{Namespace: "ns"} + builder := newMonolithBuilder(info.NewInstance(image), &cfg) + tBuilder := newTransfoBuilder(info.NewInstance(image), &cfg) + iBuilder := newIngestBuilder(info.NewInstance(image), &cfg) // Deployment - depl := tBuilder.deployment("digest") + depl := tBuilder.deployment(annotate("digest")) assert.Equal("flowlogs-pipeline-transformer", depl.Labels["app"]) assert.Equal("flowlogs-pipeline-transformer", depl.Spec.Template.Labels["app"]) assert.Equal("dev", depl.Labels["version"]) assert.Equal("dev", depl.Spec.Template.Labels["version"]) // DaemonSet - ds := builder.daemonSet("digest") + ds := builder.daemonSet(annotate("digest")) assert.Equal("flowlogs-pipeline", ds.Labels["app"]) assert.Equal("flowlogs-pipeline", ds.Spec.Template.Labels["app"]) assert.Equal("dev", ds.Labels["version"]) assert.Equal("dev", ds.Spec.Template.Labels["version"]) // DaemonSet (ingester) - ds2 := iBuilder.daemonSet("digest") + ds2 := iBuilder.daemonSet(annotate("digest")) assert.Equal("flowlogs-pipeline-ingester", ds2.Labels["app"]) assert.Equal("flowlogs-pipeline-ingester", ds2.Spec.Template.Labels["app"]) assert.Equal("dev", ds2.Labels["version"]) @@ -698,7 +718,7 @@ func TestPipelineConfig(t *testing.T) { ns := "namespace" cfg := getConfig() cfg.Processor.LogLevel = "info" - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) stages, parameters, _, err := b.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) @@ -707,7 +727,8 @@ func TestPipelineConfig(t *testing.T) { // Kafka Ingester cfg.DeploymentModel = flowslatest.DeploymentModelKafka - bi := newIngestBuilder(ns, image, &cfg, true) + info := reconcilers.Common{Namespace: ns} + bi := newIngestBuilder(info.NewInstance(image), &cfg) stages, parameters, err = bi.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) @@ -715,7 +736,7 @@ func TestPipelineConfig(t *testing.T) { assert.Equal(`[{"name":"ipfix"},{"name":"kafka-write","follows":"ipfix"}]`, string(jsonStages)) // Kafka Transformer - bt := newTransfoBuilder(ns, image, &cfg, true) + bt := transfBuilder(ns, &cfg) stages, parameters, _, err = bt.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) @@ -731,7 +752,7 @@ func TestPipelineConfigDropUnused(t *testing.T) { cfg := getConfig() cfg.Processor.LogLevel = "info" cfg.Processor.DropUnusedFields = pointer.Bool(true) - b := newMonolithBuilder(ns, image, &cfg, true) + b := monoBuilder(ns, &cfg) stages, parameters, _, err := b.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) @@ -749,7 +770,7 @@ func TestPipelineTraceStage(t *testing.T) { cfg := getConfig() - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) stages, parameters, _, err := b.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) @@ -762,7 +783,7 @@ func TestMergeMetricsConfigurationNoIgnore(t *testing.T) { cfg := getConfig() - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) stages, parameters, cm, err := b.buildPipelineConfig() assert.NoError(err) assert.NotNil(cm) @@ -791,7 +812,7 @@ func TestMergeMetricsConfigurationWithIgnore(t *testing.T) { cfg := getConfig() cfg.Processor.Metrics.IgnoreTags = []string{"nodes"} - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) stages, parameters, cm, err := b.buildPipelineConfig() assert.NoError(err) assert.NotNil(cm) @@ -814,7 +835,7 @@ func TestMergeMetricsConfigurationIgnoreAll(t *testing.T) { cfg := getConfig() cfg.Processor.Metrics.IgnoreTags = []string{"nodes", "namespaces", "workloads"} - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) stages, parameters, cm, err := b.buildPipelineConfig() assert.NoError(err) assert.Nil(cm) @@ -842,7 +863,7 @@ func TestPipelineWithExporter(t *testing.T) { }, }) - b := newMonolithBuilder("namespace", image, &cfg, true) + b := monoBuilder("namespace", &cfg) stages, parameters, _, err := b.buildPipelineConfig() assert.NoError(err) assert.True(validatePipelineConfig(stages, parameters)) diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index 2139ae7d2..d9ae34735 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -10,6 +10,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -17,19 +18,19 @@ type transfoBuilder struct { generic builder } -func newTransfoBuilder(ns, image string, desired *flowslatest.FlowCollectorSpec, useOpenShiftSCC bool) transfoBuilder { - gen := newBuilder(ns, image, desired, ConfKafkaTransformer, useOpenShiftSCC) +func newTransfoBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSpec) transfoBuilder { + gen := newBuilder(info, desired, ConfKafkaTransformer) return transfoBuilder{ generic: gen, } } -func (b *transfoBuilder) deployment(configDigest string) *appsv1.Deployment { - pod := b.generic.podTemplate(false /*no listen*/, true /*loki itf*/, false /*no host network*/, configDigest) +func (b *transfoBuilder) deployment(annotations map[string]string) *appsv1.Deployment { + pod := b.generic.podTemplate(false /*no listen*/, true /*loki itf*/, false /*no host network*/, annotations) return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: b.generic.name(), - Namespace: b.generic.namespace, + Namespace: b.generic.info.Namespace, Labels: b.generic.labels, }, Spec: appsv1.DeploymentSpec{ @@ -64,7 +65,7 @@ func (b *transfoBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePa Topic: b.generic.desired.Kafka.Topic, GroupId: b.generic.name(), // Without groupid, each message is delivered to each consumers Decoder: decoder, - TLS: getKafkaTLS(&b.generic.desired.Kafka.TLS), + TLS: b.generic.getKafkaTLS(&b.generic.desired.Kafka.TLS, "kafka-cert"), PullQueueCapacity: b.generic.desired.Processor.KafkaConsumerQueueCapacity, PullMaxBytes: b.generic.desired.Processor.KafkaConsumerBatchSize, }) @@ -84,7 +85,7 @@ func (b *transfoBuilder) autoScaler() *ascv2.HorizontalPodAutoscaler { return &ascv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: b.generic.name(), - Namespace: b.generic.namespace, + Namespace: b.generic.info.Namespace, Labels: b.generic.labels, }, Spec: ascv2.HorizontalPodAutoscalerSpec{ diff --git a/controllers/flowlogspipeline/flp_transfo_reconciler.go b/controllers/flowlogspipeline/flp_transfo_reconciler.go index 61bccff18..6d8e812e4 100644 --- a/controllers/flowlogspipeline/flp_transfo_reconciler.go +++ b/controllers/flowlogspipeline/flp_transfo_reconciler.go @@ -20,7 +20,7 @@ import ( // flpTransformerReconciler reconciles the current flowlogs-pipeline-transformer state with the desired configuration type flpTransformerReconciler struct { singleReconciler - reconcilersCommonInfo + *reconcilers.Instance owned transfoOwnedObjects } @@ -35,7 +35,7 @@ type transfoOwnedObjects struct { prometheusRule *monitoringv1.PrometheusRule } -func newTransformerReconciler(info *reconcilersCommonInfo) *flpTransformerReconciler { +func newTransformerReconciler(cmn *reconcilers.Instance) *flpTransformerReconciler { name := name(ConfKafkaTransformer) owned := transfoOwnedObjects{ deployment: &appsv1.Deployment{}, @@ -47,22 +47,22 @@ func newTransformerReconciler(info *reconcilersCommonInfo) *flpTransformerReconc serviceMonitor: &monitoringv1.ServiceMonitor{}, prometheusRule: &monitoringv1.PrometheusRule{}, } - info.nobjMngr.AddManagedObject(name, owned.deployment) - info.nobjMngr.AddManagedObject(name, owned.hpa) - info.nobjMngr.AddManagedObject(name, owned.serviceAccount) - info.nobjMngr.AddManagedObject(promServiceName(ConfKafkaTransformer), owned.promService) - info.nobjMngr.AddManagedObject(RoleBindingName(ConfKafkaTransformer), owned.roleBinding) - info.nobjMngr.AddManagedObject(configMapName(ConfKafkaTransformer), owned.configMap) - if info.availableAPIs.HasSvcMonitor() { - info.nobjMngr.AddManagedObject(serviceMonitorName(ConfKafkaTransformer), owned.serviceMonitor) + cmn.Managed.AddManagedObject(name, owned.deployment) + cmn.Managed.AddManagedObject(name, owned.hpa) + cmn.Managed.AddManagedObject(name, owned.serviceAccount) + cmn.Managed.AddManagedObject(promServiceName(ConfKafkaTransformer), owned.promService) + cmn.Managed.AddManagedObject(RoleBindingName(ConfKafkaTransformer), owned.roleBinding) + cmn.Managed.AddManagedObject(configMapName(ConfKafkaTransformer), owned.configMap) + if cmn.AvailableAPIs.HasSvcMonitor() { + cmn.Managed.AddManagedObject(serviceMonitorName(ConfKafkaTransformer), owned.serviceMonitor) } - if info.availableAPIs.HasPromRule() { - info.nobjMngr.AddManagedObject(prometheusRuleName(ConfKafkaTransformer), owned.prometheusRule) + if cmn.AvailableAPIs.HasPromRule() { + cmn.Managed.AddManagedObject(prometheusRuleName(ConfKafkaTransformer), owned.prometheusRule) } return &flpTransformerReconciler{ - reconcilersCommonInfo: *info, - owned: owned, + Instance: cmn, + owned: owned, } } @@ -73,28 +73,31 @@ func (r *flpTransformerReconciler) context(ctx context.Context) context.Context // cleanupNamespace cleans up old namespace func (r *flpTransformerReconciler) cleanupNamespace(ctx context.Context) { - r.nobjMngr.CleanupPreviousNamespace(ctx) + r.Managed.CleanupPreviousNamespace(ctx) } func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flowslatest.FlowCollector) error { // Retrieve current owned objects - err := r.nobjMngr.FetchAll(ctx) + err := r.Managed.FetchAll(ctx) if err != nil { return err } // Transformer only used with Kafka if !helper.UseKafka(&desired.Spec) { - r.nobjMngr.TryDeleteAll(ctx) + r.Managed.TryDeleteAll(ctx) return nil } - builder := newTransfoBuilder(r.nobjMngr.Namespace, r.image, &desired.Spec, r.useOpenShiftSCC) + builder := newTransfoBuilder(r.Instance, &desired.Spec) newCM, configDigest, dbConfigMap, err := builder.configMap() if err != nil { return err } - if !r.nobjMngr.Exists(r.owned.configMap) { + annotations := map[string]string{ + constants.PodConfigurationDigest: configDigest, + } + if !r.Managed.Exists(r.owned.configMap) { if err := r.CreateOwned(ctx, newCM); err != nil { return err } @@ -103,8 +106,8 @@ func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flows return err } } - if r.reconcilersCommonInfo.availableAPIs.HasConsoleConfig() { - if err := r.reconcileDashboardConfig(ctx, dbConfigMap); err != nil { + if r.AvailableAPIs.HasConsoleConfig() { + if err := reconcileDashboardConfig(ctx, &r.Client, dbConfigMap); err != nil { return err } } @@ -117,16 +120,31 @@ func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flows return err } - return r.reconcileDeployment(ctx, &desired.Spec.Processor, &builder, configDigest) + // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation + // because certificate is always reloaded from file + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + return err + } + + // Watch for Kafka certificate if necessary; need to restart pods in case of cert rotation + if err = annotateKafkaCerts(ctx, r.Common, &desired.Spec.Kafka, "kafka", annotations); err != nil { + return err + } + // Same for Kafka exporters + if err = annotateKafkaExporterCerts(ctx, r.Common, desired.Spec.Exporters, annotations); err != nil { + return err + } + + return r.reconcileDeployment(ctx, &desired.Spec.Processor, &builder, annotations) } -func (r *flpTransformerReconciler) reconcileDeployment(ctx context.Context, desiredFLP *flpSpec, builder *transfoBuilder, configDigest string) error { +func (r *flpTransformerReconciler) reconcileDeployment(ctx context.Context, desiredFLP *flpSpec, builder *transfoBuilder, annotations map[string]string) error { report := helper.NewChangeReport("FLP Deployment") defer report.LogIfNeeded(ctx) - new := builder.deployment(configDigest) + new := builder.deployment(annotations) - if !r.nobjMngr.Exists(r.owned.deployment) { + if !r.Managed.Exists(r.owned.deployment) { if err := r.CreateOwned(ctx, new); err != nil { return err } @@ -141,10 +159,10 @@ func (r *flpTransformerReconciler) reconcileDeployment(ctx context.Context, desi // Delete or Create / Update Autoscaler according to HPA option if helper.HPADisabled(&desiredFLP.KafkaConsumerAutoscaler) { - r.nobjMngr.TryDelete(ctx, r.owned.hpa) + r.Managed.TryDelete(ctx, r.owned.hpa) } else { newASC := builder.autoScaler() - if !r.nobjMngr.Exists(r.owned.hpa) { + if !r.Managed.Exists(r.owned.hpa) { if err := r.CreateOwned(ctx, newASC); err != nil { return err } @@ -161,18 +179,18 @@ func (r *flpTransformerReconciler) reconcilePrometheusService(ctx context.Contex report := helper.NewChangeReport("FLP prometheus service") defer report.LogIfNeeded(ctx) - if err := reconcilers.ReconcileService(ctx, r.nobjMngr, &r.ClientHelper, r.owned.promService, builder.promService(), &report); err != nil { + if err := r.ReconcileService(ctx, r.owned.promService, builder.promService(), &report); err != nil { return err } - if r.availableAPIs.HasSvcMonitor() { + if r.AvailableAPIs.HasSvcMonitor() { serviceMonitor := builder.generic.serviceMonitor() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } - if r.availableAPIs.HasPromRule() { + if r.AvailableAPIs.HasPromRule() { promRules := builder.generic.prometheusRule() - if err := reconcilers.GenericReconcile(ctx, r.nobjMngr, &r.ClientHelper, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { + if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.owned.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err } } @@ -180,7 +198,7 @@ func (r *flpTransformerReconciler) reconcilePrometheusService(ctx context.Contex } func (r *flpTransformerReconciler) reconcilePermissions(ctx context.Context, builder *transfoBuilder) error { - if !r.nobjMngr.Exists(r.owned.serviceAccount) { + if !r.Managed.Exists(r.owned.serviceAccount) { return r.CreateOwned(ctx, builder.serviceAccount()) } // We only configure name, update is not needed for now diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index ff4ee944e..4423f8278 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -3,7 +3,6 @@ package ovs import ( "context" "fmt" - "net" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -17,22 +16,16 @@ import ( ) type FlowsConfigCNOController struct { - ovsConfigMapName string - collectorNamespace string - cnoNamespace string - client reconcilers.ClientHelper - lookupIP func(string) ([]net.IP, error) + *reconcilers.Common + ovsConfigMapName string + cnoNamespace string } -func NewFlowsConfigCNOController(client reconcilers.ClientHelper, - collectorNamespace, cnoNamespace, ovsConfigMapName string, - lookupIP func(string) ([]net.IP, error)) *FlowsConfigCNOController { +func NewFlowsConfigCNOController(common *reconcilers.Common, cnoNamespace, ovsConfigMapName string) *FlowsConfigCNOController { return &FlowsConfigCNOController{ - client: client, - collectorNamespace: collectorNamespace, - cnoNamespace: cnoNamespace, - ovsConfigMapName: ovsConfigMapName, - lookupIP: lookupIP, + Common: common, + cnoNamespace: cnoNamespace, + ovsConfigMapName: ovsConfigMapName, } } @@ -51,7 +44,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl } // If the user has changed the agent type, we need to manually undeploy the configmap if current != nil { - return c.client.Delete(ctx, &corev1.ConfigMap{ + return c.Delete(ctx, &corev1.ConfigMap{ ObjectMeta: v1.ObjectMeta{ Name: c.ovsConfigMapName, Namespace: c.cnoNamespace, @@ -70,7 +63,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl if err != nil { return err } - return c.client.Create(ctx, cm) + return c.Create(ctx, cm) } if desired != nil && *desired != *current { @@ -79,7 +72,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl if err != nil { return err } - return c.client.Update(ctx, cm) + return c.Update(ctx, cm) } rlog.Info("No changes needed") @@ -88,7 +81,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl func (c *FlowsConfigCNOController) current(ctx context.Context) (*flowsConfig, error) { curr := &corev1.ConfigMap{} - if err := c.client.Get(ctx, types.NamespacedName{ + if err := c.Get(ctx, types.NamespacedName{ Name: c.ovsConfigMapName, Namespace: c.cnoNamespace, }, curr); err != nil { @@ -133,7 +126,7 @@ func (c *FlowsConfigCNOController) flowsConfigMap(fc *flowsConfig) (*corev1.Conf }, Data: data, } - if err := c.client.SetControllerReference(cm); err != nil { + if err := c.SetControllerReference(cm); err != nil { return nil, err } return cm, nil diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index 5859b7006..f67ff67d0 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "net" "strconv" "time" @@ -20,18 +19,14 @@ import ( ) type FlowsConfigOVNKController struct { - namespace string - config flowslatest.OVNKubernetesConfig - client reconcilers.ClientHelper - lookupIP func(string) ([]net.IP, error) + *reconcilers.Common + config flowslatest.OVNKubernetesConfig } -func NewFlowsConfigOVNKController(client reconcilers.ClientHelper, namespace string, config flowslatest.OVNKubernetesConfig, lookupIP func(string) ([]net.IP, error)) *FlowsConfigOVNKController { +func NewFlowsConfigOVNKController(common *reconcilers.Common, config flowslatest.OVNKubernetesConfig) *FlowsConfigOVNKController { return &FlowsConfigOVNKController{ - client: client, - namespace: namespace, - config: config, - lookupIP: lookupIP, + Common: common, + config: config, } } @@ -52,7 +47,7 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows rlog := log.FromContext(ctx, "component", "FlowsConfigOVNKController") ds := &appsv1.DaemonSet{} - if err := c.client.Get(ctx, types.NamespacedName{ + if err := c.Get(ctx, types.NamespacedName{ Name: c.config.DaemonSetName, Namespace: c.config.Namespace, }, ds); err != nil { @@ -77,7 +72,7 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } if anyUpdate { rlog.Info("Provided IPFIX configuration differs current configuration. Updating") - return c.client.Update(ctx, ds) + return c.Update(ctx, ds) } rlog.Info("No changes needed") diff --git a/controllers/reconcilers/client_helper.go b/controllers/reconcilers/common.go similarity index 60% rename from controllers/reconcilers/client_helper.go rename to controllers/reconcilers/common.go index 35cfdf0de..4c415da61 100644 --- a/controllers/reconcilers/client_helper.go +++ b/controllers/reconcilers/common.go @@ -5,8 +5,10 @@ import ( "fmt" "reflect" + "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/pkg/discover" "github.com/netobserv/network-observability-operator/pkg/helper" - appsv1 "k8s.io/api/apps/v1" + "github.com/netobserv/network-observability-operator/pkg/watchers" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -15,76 +17,39 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) -// ClientHelper includes a kube client with some additional helper functions -type ClientHelper struct { - client.Client - SetControllerReference func(client.Object) error - changed bool - deplInProgress bool +type Common struct { + helper.Client + Watcher *watchers.Watcher + Namespace string + PreviousNamespace string + UseOpenShiftSCC bool + AvailableAPIs *discover.AvailableAPIs } -// CreateOwned is an helper function that creates an object, sets owner reference and writes info & errors logs -func (c *ClientHelper) CreateOwned(ctx context.Context, obj client.Object) error { - log := log.FromContext(ctx) - c.changed = true - err := c.SetControllerReference(obj) - if err != nil { - log.Error(err, "Failed to set controller reference") - return err - } - kind := reflect.TypeOf(obj).String() - log.Info("Creating a new "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) - err = c.Create(ctx, obj) - if err != nil { - log.Error(err, "Failed to create new "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) - return err - } - return nil -} - -// UpdateOwned is an helper function that updates an object, sets owner reference and writes info & errors logs -func (c *ClientHelper) UpdateOwned(ctx context.Context, old, obj client.Object) error { - log := log.FromContext(ctx) - c.changed = true - if old != nil { - obj.SetResourceVersion(old.GetResourceVersion()) - } - err := c.SetControllerReference(obj) - if err != nil { - log.Error(err, "Failed to set controller reference") - return err - } - kind := reflect.TypeOf(obj).String() - log.Info("Updating "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) - err = c.Update(ctx, obj) - if err != nil { - log.Error(err, "Failed to update "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) - return err - } - return nil +func (c *Common) PrivilegedNamespace() string { + return c.Namespace + constants.EBPFPrivilegedNSSuffix } -func (c *ClientHelper) DidChange() bool { - return c.changed +func (c *Common) PreviousPrivilegedNamespace() string { + return c.PreviousNamespace + constants.EBPFPrivilegedNSSuffix } -func (c *ClientHelper) IsInProgress() bool { - return c.deplInProgress -} - -func (c *ClientHelper) CheckDeploymentInProgress(d *appsv1.Deployment) { - if d.Status.AvailableReplicas < d.Status.Replicas { - c.deplInProgress = true - } +type Instance struct { + *Common + Managed *NamespacedObjectManager + Image string } -func (c *ClientHelper) CheckDaemonSetInProgress(ds *appsv1.DaemonSet) { - if ds.Status.NumberAvailable < ds.Status.DesiredNumberScheduled { - c.deplInProgress = true +func (c *Common) NewInstance(image string) *Instance { + managed := NewNamespacedObjectManager(c) + return &Instance{ + Common: c, + Managed: managed, + Image: image, } } -func (c *ClientHelper) ReconcileClusterRoleBinding(ctx context.Context, desired *rbacv1.ClusterRoleBinding) error { +func (c *Common) ReconcileClusterRoleBinding(ctx context.Context, desired *rbacv1.ClusterRoleBinding) error { actual := rbacv1.ClusterRoleBinding{} if err := c.Get(ctx, types.NamespacedName{Name: desired.ObjectMeta.Name}, &actual); err != nil { if errors.IsNotFound(err) { @@ -111,7 +76,7 @@ func (c *ClientHelper) ReconcileClusterRoleBinding(ctx context.Context, desired return c.UpdateOwned(ctx, &actual, desired) } -func (c *ClientHelper) ReconcileRoleBinding(ctx context.Context, desired *rbacv1.RoleBinding) error { +func (c *Common) ReconcileRoleBinding(ctx context.Context, desired *rbacv1.RoleBinding) error { actual := rbacv1.RoleBinding{} if err := c.Get(ctx, types.NamespacedName{Name: desired.ObjectMeta.Name}, &actual); err != nil { if errors.IsNotFound(err) { @@ -138,7 +103,7 @@ func (c *ClientHelper) ReconcileRoleBinding(ctx context.Context, desired *rbacv1 return c.UpdateOwned(ctx, &actual, desired) } -func (c *ClientHelper) ReconcileClusterRole(ctx context.Context, desired *rbacv1.ClusterRole) error { +func (c *Common) ReconcileClusterRole(ctx context.Context, desired *rbacv1.ClusterRole) error { actual := rbacv1.ClusterRole{} if err := c.Get(ctx, types.NamespacedName{Name: desired.Name}, &actual); err != nil { if errors.IsNotFound(err) { @@ -156,7 +121,7 @@ func (c *ClientHelper) ReconcileClusterRole(ctx context.Context, desired *rbacv1 return c.UpdateOwned(ctx, &actual, desired) } -func (c *ClientHelper) ReconcileRole(ctx context.Context, desired *rbacv1.Role) error { +func (c *Common) ReconcileRole(ctx context.Context, desired *rbacv1.Role) error { actual := rbacv1.Role{} if err := c.Get(ctx, types.NamespacedName{Name: desired.Name}, &actual); err != nil { if errors.IsNotFound(err) { @@ -174,7 +139,7 @@ func (c *ClientHelper) ReconcileRole(ctx context.Context, desired *rbacv1.Role) return c.UpdateOwned(ctx, &actual, desired) } -func (c *ClientHelper) ReconcileConfigMap(ctx context.Context, desired *corev1.ConfigMap) error { +func (c *Common) ReconcileConfigMap(ctx context.Context, desired *corev1.ConfigMap) error { actual := corev1.ConfigMap{} if err := c.Get(ctx, types.NamespacedName{Name: desired.Name, Namespace: desired.Namespace}, &actual); err != nil { if errors.IsNotFound(err) { @@ -191,3 +156,29 @@ func (c *ClientHelper) ReconcileConfigMap(ctx context.Context, desired *corev1.C return c.UpdateOwned(ctx, &actual, desired) } + +func (i *Instance) ReconcileService(ctx context.Context, old, new *corev1.Service, report *helper.ChangeReport) error { + if !i.Managed.Exists(old) { + if err := i.CreateOwned(ctx, new); err != nil { + return err + } + } else if helper.ServiceChanged(old, new, report) { + // In case we're updating an existing service, we need to build from the old one to keep immutable fields such as clusterIP + newSVC := old.DeepCopy() + newSVC.Spec.Ports = new.Spec.Ports + if err := i.UpdateOwned(ctx, old, newSVC); err != nil { + return err + } + } + return nil +} + +func GenericReconcile[K client.Object](ctx context.Context, m *NamespacedObjectManager, cl *helper.Client, old, new K, report *helper.ChangeReport, changeFunc func(old, new K, report *helper.ChangeReport) bool) error { + if !m.Exists(old) { + return cl.CreateOwned(ctx, new) + } + if changeFunc(old, new, report) { + return cl.UpdateOwned(ctx, old, new) + } + return nil +} diff --git a/controllers/reconcilers/namespaced_objects_manager.go b/controllers/reconcilers/namespaced_objects_manager.go index 57b75acc1..3cae5da47 100644 --- a/controllers/reconcilers/namespaced_objects_manager.go +++ b/controllers/reconcilers/namespaced_objects_manager.go @@ -5,13 +5,10 @@ import ( "reflect" "strings" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/netobserv/network-observability-operator/pkg/helper" ) // NamespacedObjectManager provides some helpers to manage (fetch, delete) namespace-scoped objects @@ -29,11 +26,11 @@ type managedObject struct { found bool } -func NewNamespacedObjectManager(cl client.Client, ns, prevNS string) *NamespacedObjectManager { +func NewNamespacedObjectManager(cmn *Common) *NamespacedObjectManager { return &NamespacedObjectManager{ - client: cl, - Namespace: ns, - PreviousNamespace: prevNS, + client: cmn.Client, + Namespace: cmn.Namespace, + PreviousNamespace: cmn.PreviousNamespace, } } @@ -126,29 +123,3 @@ func (m *NamespacedObjectManager) Exists(obj client.Object) bool { } return false } - -func GenericReconcile[K client.Object](ctx context.Context, m *NamespacedObjectManager, cl *ClientHelper, old, new K, report *helper.ChangeReport, changeFunc func(old, new K, report *helper.ChangeReport) bool) error { - if !m.Exists(old) { - return cl.CreateOwned(ctx, new) - } - if changeFunc(old, new, report) { - return cl.UpdateOwned(ctx, old, new) - } - return nil -} - -func ReconcileService(ctx context.Context, m *NamespacedObjectManager, cl *ClientHelper, old, new *corev1.Service, report *helper.ChangeReport) error { - if !m.Exists(old) { - if err := cl.CreateOwned(ctx, new); err != nil { - return err - } - } else if helper.ServiceChanged(old, new, report) { - // In case we're updating an existing service, we need to build from the old one to keep immutable fields such as clusterIP - newSVC := old.DeepCopy() - newSVC.Spec.Ports = new.Spec.Ports - if err := cl.UpdateOwned(ctx, old, newSVC); err != nil { - return err - } - } - return nil -} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index d6ac7d6ca..2786b986e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -53,6 +53,8 @@ import ( const testCnoNamespace = "openshift-network-operator" +var namespacesToPrepare = []string{testCnoNamespace, "openshift-config-managed", "loki-namespace", "kafka-exporter-namespace", "main-namespace", "main-namespace-privileged"} + var ( ctx context.Context k8sManager manager.Manager @@ -75,6 +77,7 @@ var _ = Describe("FlowCollector Controller", Ordered, Serial, func() { flowCollectorEBPFSpecs() flowCollectorEBPFKafkaSpecs() flowCollectorIsoSpecs() + flowCollectorCertificatesSpecs() }) var _ = BeforeSuite(func() { @@ -157,17 +160,13 @@ var _ = AfterSuite(func() { }) func prepareNamespaces() error { - if err := k8sClient.Create(ctx, &corev1.Namespace{ - TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, - ObjectMeta: metav1.ObjectMeta{Name: testCnoNamespace}, - }); err != nil { - return err - } - if err := k8sClient.Create(ctx, &corev1.Namespace{ - TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, - ObjectMeta: metav1.ObjectMeta{Name: "openshift-config-managed"}, - }); err != nil { - return err + for _, ns := range namespacesToPrepare { + if err := k8sClient.Create(ctx, &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{Name: ns}, + }); err != nil { + return err + } } return nil } diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 41557de79..f2c187b04 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -1830,6 +1830,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -1880,6 +1889,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -2027,6 +2045,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -2077,6 +2104,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -2315,6 +2351,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -2365,6 +2410,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -3626,6 +3680,15 @@ TLS configuration. name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -5653,6 +5716,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -5703,6 +5775,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -5850,6 +5931,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -5900,6 +5990,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -6145,6 +6244,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -6195,6 +6303,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -6297,6 +6414,15 @@ caCert defines the reference of the certificate for the Certificate Authority name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -6347,6 +6473,15 @@ userCert defines the user certificate reference, used for mTLS (you can ignore i name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum @@ -7643,6 +7778,15 @@ TLS configuration. name of the config map or secret containing certificates
false + + namespace + string + + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+ + false type enum diff --git a/pkg/helper/certificates.go b/pkg/helper/certificates.go deleted file mode 100644 index 8efea2ea1..000000000 --- a/pkg/helper/certificates.go +++ /dev/null @@ -1,116 +0,0 @@ -package helper - -import ( - "fmt" - - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" - "github.com/netobserv/network-observability-operator/controllers/constants" - corev1 "k8s.io/api/core/v1" -) - -// AppendCertVolumes will add a volume + volume mount for a CA cert if defined, and another volume + volume mount for a user cert if defined. -// It does nothing if neither is defined. -func AppendCertVolumes(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, config *flowslatest.ClientTLS, name string) ([]corev1.Volume, []corev1.VolumeMount) { - volOut := volumes - vmOut := volumeMounts - if config.CACert.Name != "" { - vol, vm := buildVolume(config.CACert, constants.CertCAName(name)) - volOut = append(volOut, vol) - vmOut = append(vmOut, vm) - } - if config.UserCert.Name != "" { - vol, vm := buildVolume(config.UserCert, constants.CertUserName(name)) - volOut = append(volOut, vol) - vmOut = append(vmOut, vm) - } - return volOut, vmOut -} - -func AppendSingleCertVolumes(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, config *flowslatest.CertificateReference, name string) ([]corev1.Volume, []corev1.VolumeMount) { - volOut := volumes - vmOut := volumeMounts - if config.Name != "" { - vol, vm := buildVolume(*config, name) - volOut = append(volOut, vol) - vmOut = append(vmOut, vm) - } - return volOut, vmOut -} - -func buildVolume(ref flowslatest.CertificateReference, name string) (corev1.Volume, corev1.VolumeMount) { - var vol corev1.Volume - if ref.Type == flowslatest.CertRefTypeConfigMap { - vol = corev1.Volume{ - Name: name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: ref.Name, - }, - }, - }, - } - } else { - vol = corev1.Volume{ - Name: name, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: ref.Name, - }, - }, - } - } - return vol, corev1.VolumeMount{ - Name: name, - ReadOnly: true, - MountPath: "/var/" + name, - } -} - -func getPath(base, suffix, file string) string { - if len(suffix) > 0 { - return fmt.Sprintf("/var/%s-%s/%s", base, suffix, file) - } - return fmt.Sprintf("/var/%s/%s", base, file) -} - -// GetCACertPath returns the CA cert path that corresponds to a volume/volume mount created with "AppendCertVolumes" -// When not available, an empty string is returned. -func GetCACertPath(config *flowslatest.ClientTLS, name string) string { - if config.CACert.Name != "" { - return getPath(name, constants.CertCASuffix, config.CACert.CertFile) - } - return "" -} - -// GetUserCertPath returns the user cert path that corresponds to a volume/volume mount created with "AppendCertVolumes" -// When not available, an empty string is returned. -func GetUserCertPath(config *flowslatest.ClientTLS, name string) string { - if config.UserCert.Name != "" { - return getPath(name, constants.CertUserSuffix, config.UserCert.CertFile) - } - return "" -} - -// GetUserKeyPath returns the user private key path that corresponds to a volume/volume mount created with "AppendCertVolumes" -// When not available, an empty string is returned. -func GetUserKeyPath(config *flowslatest.ClientTLS, name string) string { - if config.UserCert.Name != "" { - return getPath(name, constants.CertUserSuffix, config.UserCert.CertKey) - } - return "" -} - -func GetSingleCertPath(config *flowslatest.CertificateReference, name string) string { - if config.Name != "" { - return getPath(name, "", config.CertFile) - } - return "" -} - -func GetSingleKeyPath(config *flowslatest.CertificateReference, name string) string { - if config.Name != "" { - return getPath(name, "", config.CertKey) - } - return "" -} diff --git a/pkg/helper/client_helper.go b/pkg/helper/client_helper.go new file mode 100644 index 000000000..1230d3558 --- /dev/null +++ b/pkg/helper/client_helper.go @@ -0,0 +1,80 @@ +package helper + +import ( + "context" + "reflect" + + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Client includes a kube client with some additional helper functions +type Client struct { + client.Client + SetControllerReference func(client.Object) error + SetChanged func(bool) + SetInProgress func(bool) +} + +func UnmanagedClient(cl client.Client) Client { + return Client{ + Client: cl, + SetControllerReference: func(o client.Object) error { return nil }, + SetChanged: func(b bool) {}, + SetInProgress: func(b bool) {}, + } +} + +// CreateOwned is an helper function that creates an object, sets owner reference and writes info & errors logs +func (c *Client) CreateOwned(ctx context.Context, obj client.Object) error { + log := log.FromContext(ctx) + c.SetChanged(true) + err := c.SetControllerReference(obj) + if err != nil { + log.Error(err, "Failed to set controller reference") + return err + } + kind := reflect.TypeOf(obj).String() + log.Info("Creating a new "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) + err = c.Create(ctx, obj) + if err != nil { + log.Error(err, "Failed to create new "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) + return err + } + return nil +} + +// UpdateOwned is an helper function that updates an object, sets owner reference and writes info & errors logs +func (c *Client) UpdateOwned(ctx context.Context, old, obj client.Object) error { + log := log.FromContext(ctx) + c.SetChanged(true) + if old != nil { + obj.SetResourceVersion(old.GetResourceVersion()) + } + err := c.SetControllerReference(obj) + if err != nil { + log.Error(err, "Failed to set controller reference") + return err + } + kind := reflect.TypeOf(obj).String() + log.Info("Updating "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) + err = c.Update(ctx, obj) + if err != nil { + log.Error(err, "Failed to update "+kind, "Namespace", obj.GetNamespace(), "Name", obj.GetName()) + return err + } + return nil +} + +func (c *Client) CheckDeploymentInProgress(d *appsv1.Deployment) { + if d.Status.AvailableReplicas < d.Status.Replicas { + c.SetInProgress(true) + } +} + +func (c *Client) CheckDaemonSetInProgress(ds *appsv1.DaemonSet) { + if ds.Status.NumberAvailable < ds.Status.DesiredNumberScheduled { + c.SetInProgress(true) + } +} diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index b01b251c5..b34c402ab 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -21,6 +21,14 @@ func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { return spec.DeploymentModel == flowslatest.DeploymentModelKafka } +func HasKafkaExporter(spec *flowslatest.FlowCollectorSpec) bool { + for _, ex := range spec.Exporters { + if ex.Type == flowslatest.KafkaExporter { + return true + } + } + return false +} func HPADisabled(spec *flowslatest.FlowCollectorHPA) bool { return spec.Status == flowslatest.HPAStatusDisabled diff --git a/pkg/helper/tokens.go b/pkg/helper/tokens.go deleted file mode 100644 index ab594658d..000000000 --- a/pkg/helper/tokens.go +++ /dev/null @@ -1,33 +0,0 @@ -package helper - -import ( - corev1 "k8s.io/api/core/v1" -) - -const TokensPath = "/var/run/secrets/tokens/" - -// AppendTokenVolume will add a volume + volume mount for a service account token if defined -func AppendTokenVolume(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, name string, fileName string) ([]corev1.Volume, []corev1.VolumeMount) { - volOut := append(volumes, - corev1.Volume{ - Name: name, - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - { - ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ - Path: fileName, - }, - }, - }, - }, - }, - }) - vmOut := append(volumeMounts, - corev1.VolumeMount{ - MountPath: TokensPath, - Name: name, - }, - ) - return volOut, vmOut -} diff --git a/pkg/test/kube_mock.go b/pkg/test/kube_mock.go new file mode 100644 index 000000000..82cc0f58c --- /dev/null +++ b/pkg/test/kube_mock.go @@ -0,0 +1,87 @@ +package test + +import ( + "context" + "errors" + + "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ClientMock struct { + mock.Mock + client.Client + objs map[string]client.Object +} + +func key(obj client.Object) string { + return obj.GetObjectKind().GroupVersionKind().Kind + "/" + obj.GetNamespace() + "/" + obj.GetName() +} + +func (o *ClientMock) Get(ctx context.Context, nsname types.NamespacedName, obj client.Object) error { + args := o.Called(ctx, nsname, obj) + return args.Error(0) +} + +func (o *ClientMock) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + k := key(obj) + if _, exists := o.objs[k]; exists { + return errors.New("already exists") + } + o.objs[k] = obj + args := o.Called(ctx, obj, opts) + return args.Error(0) +} + +func (o *ClientMock) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + k := key(obj) + if _, exists := o.objs[k]; !exists { + return errors.New("doesn't exist") + } + o.objs[k] = obj + args := o.Called(ctx, obj, opts) + return args.Error(0) +} + +func (o *ClientMock) MockSecret(obj *v1.Secret) { + if o.objs == nil { + o.objs = map[string]client.Object{} + } + o.objs[key(obj)] = obj + o.On("Get", mock.Anything, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, mock.Anything).Run(func(args mock.Arguments) { + arg := args.Get(2).(*v1.Secret) + arg.SetName(obj.GetName()) + arg.SetNamespace(obj.GetNamespace()) + arg.Data = obj.Data + }).Return(nil) +} + +func (o *ClientMock) MockConfigMap(obj *v1.ConfigMap) { + if o.objs == nil { + o.objs = map[string]client.Object{} + } + o.objs[key(obj)] = obj + o.On("Get", mock.Anything, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, mock.Anything).Run(func(args mock.Arguments) { + arg := args.Get(2).(*v1.ConfigMap) + arg.SetName(obj.GetName()) + arg.SetNamespace(obj.GetNamespace()) + arg.Data = obj.Data + }).Return(nil) +} + +func (o *ClientMock) UpdateObject(obj client.Object) { + o.objs[key(obj)] = obj +} + +func (o *ClientMock) MockNonExisting(nsn types.NamespacedName) { + o.On("Get", mock.Anything, nsn, mock.Anything).Return(kerr.NewNotFound(schema.GroupResource{}, "")) +} + +func (o *ClientMock) MockCreateUpdate() { + o.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(nil) + o.On("Update", mock.Anything, mock.Anything, mock.Anything).Return(nil) +} diff --git a/pkg/volumes/builder.go b/pkg/volumes/builder.go new file mode 100644 index 000000000..99fb60068 --- /dev/null +++ b/pkg/volumes/builder.go @@ -0,0 +1,131 @@ +package volumes + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/pkg/watchers" +) + +type VolumeInfo struct { + Volume corev1.Volume + Mount corev1.VolumeMount +} + +type Builder struct { + info []VolumeInfo +} + +func (b *Builder) AddMutualTLSCertificates(config *flowslatest.ClientTLS, namePrefix string) (caPath, userCertPath, userKeyPath string) { + if config.CACert.Name != "" { + caPath, _ = b.AddCertificate(&config.CACert, namePrefix+"-ca") + } + if config.UserCert.Name != "" { + userCertPath, userKeyPath = b.AddCertificate(&config.UserCert, namePrefix+"-user") + } + return +} + +func (b *Builder) AddCACertificate(config *flowslatest.ClientTLS, namePrefix string) (caPath string) { + if config.CACert.Name != "" { + caPath, _ = b.AddCertificate(&config.CACert, namePrefix+"-ca") + } + return +} + +func (b *Builder) AddCertificate(ref *flowslatest.CertificateReference, volumeName string) (certPath, keyPath string) { + if ref.Name != "" { + certPath = fmt.Sprintf("/var/%s/%s", volumeName, ref.CertFile) + keyPath = fmt.Sprintf("/var/%s/%s", volumeName, ref.CertKey) + vol, vm := buildVolumeAndMount(ref.Type, ref.Name, volumeName) + b.info = append(b.info, VolumeInfo{Volume: vol, Mount: vm}) + } + return +} + +func (b *Builder) AddVolume(config *watchers.ConfigOrSecret, volumeName string) string { + vol, vm := buildVolumeAndMount(config.Type, config.Name, volumeName) + b.info = append(b.info, VolumeInfo{Volume: vol, Mount: vm}) + return "/var/" + volumeName +} + +// AddToken will add a volume + volume mount for a service account token if defined +func (b *Builder) AddToken(name string) string { + vol := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: name, + }, + }, + }, + }, + }, + } + vm := corev1.VolumeMount{ + MountPath: constants.TokensPath, + Name: name, + } + b.info = append(b.info, VolumeInfo{Volume: vol, Mount: vm}) + return constants.TokensPath + name +} + +func (b *Builder) GetVolumes() []corev1.Volume { + var vols []corev1.Volume + for i := range b.info { + vols = append(vols, b.info[i].Volume) + } + return vols +} + +func (b *Builder) GetMounts() []corev1.VolumeMount { + var vols []corev1.VolumeMount + for i := range b.info { + vols = append(vols, b.info[i].Mount) + } + return vols +} + +func (b *Builder) AppendVolumes(existing []corev1.Volume) []corev1.Volume { + return append(existing, b.GetVolumes()...) +} + +func (b *Builder) AppendMounts(existing []corev1.VolumeMount) []corev1.VolumeMount { + return append(existing, b.GetMounts()...) +} + +func buildVolumeAndMount(refType flowslatest.MountableType, refName string, volumeName string) (corev1.Volume, corev1.VolumeMount) { + var vol corev1.Volume + if refType == flowslatest.RefTypeConfigMap { + vol = corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: refName, + }, + }, + }, + } + } else { + vol = corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: refName, + }, + }, + } + } + return vol, corev1.VolumeMount{ + Name: volumeName, + ReadOnly: true, + MountPath: "/var/" + volumeName, + } +} diff --git a/pkg/watchers/certificates_watcher.go b/pkg/watchers/certificates_watcher.go deleted file mode 100644 index 2173c2c0b..000000000 --- a/pkg/watchers/certificates_watcher.go +++ /dev/null @@ -1,117 +0,0 @@ -package watchers - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - flowlatest "github.com/netobserv/network-observability-operator/api/v1beta1" - "github.com/netobserv/network-observability-operator/controllers/constants" -) - -type CertificatesWatcher struct { - watched map[string]watchedObject - namespace string -} - -func NewCertificatesWatcher() CertificatesWatcher { - return CertificatesWatcher{watched: make(map[string]watchedObject)} -} - -type watchedObject struct { - nsName types.NamespacedName - kind string -} - -func RegisterCertificatesWatcher(builder *builder.Builder) *CertificatesWatcher { - watcher := NewCertificatesWatcher() - builder.Watches( - &source.Kind{Type: &corev1.Secret{}}, - handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { - if watcher.isWatched(flowlatest.CertRefTypeSecret, o) { - // Trigger FlowCollector reconcile - return []reconcile.Request{{NamespacedName: constants.FlowCollectorName}} - } - return []reconcile.Request{} - }), - ) - builder.Watches( - &source.Kind{Type: &corev1.ConfigMap{}}, - handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { - if watcher.isWatched(flowlatest.CertRefTypeConfigMap, o) { - // Trigger FlowCollector reconcile - return []reconcile.Request{{NamespacedName: constants.FlowCollectorName}} - } - return []reconcile.Request{} - }), - ) - return &watcher -} - -func (w *CertificatesWatcher) Reset(namespace string) { - w.namespace = namespace - w.watched = make(map[string]watchedObject) -} - -func (w *CertificatesWatcher) SetWatchedCertificate(key string, ref *flowlatest.CertificateReference) { - w.watched[key] = watchedObject{ - nsName: types.NamespacedName{ - Name: ref.Name, - Namespace: w.namespace, - }, - kind: ref.Type, - } -} - -func (w *CertificatesWatcher) isWatched(kind string, o client.Object) bool { - for _, watched := range w.watched { - if watched.kind == kind && watched.nsName.Name == o.GetName() && watched.nsName.Namespace == o.GetNamespace() { - return true - } - } - return false -} - -func (w *CertificatesWatcher) AnnotatePod(ctx context.Context, cl client.Client, pod *corev1.PodTemplateSpec, keyPrefixes ...string) error { - for _, keyPrefix := range keyPrefixes { - caName := constants.CertCAName(keyPrefix) - if err := w.AnnotatePodSingleVolume(ctx, cl, pod, caName); err != nil { - return err - } - userName := constants.CertUserName(keyPrefix) - if err := w.AnnotatePodSingleVolume(ctx, cl, pod, userName); err != nil { - return err - } - } - return nil -} - -func (w *CertificatesWatcher) AnnotatePodSingleVolume(ctx context.Context, cl client.Client, pod *corev1.PodTemplateSpec, key string) error { - if watched, ok := w.watched[key]; ok { - var sourceMeta *metav1.ObjectMeta - if watched.kind == flowlatest.CertRefTypeConfigMap { - var cm corev1.ConfigMap - err := cl.Get(ctx, watched.nsName, &cm) - if err != nil { - return err - } - sourceMeta = &cm.ObjectMeta - } else { - var s corev1.Secret - err := cl.Get(ctx, watched.nsName, &s) - if err != nil { - return err - } - sourceMeta = &s.ObjectMeta - } - pod.Annotations[constants.PodCertIDSuffix+key] = string(sourceMeta.GetUID()) + "/" + sourceMeta.GetResourceVersion() - } - return nil -} diff --git a/pkg/watchers/certificates_watcher_test.go b/pkg/watchers/certificates_watcher_test.go deleted file mode 100644 index 5556322cc..000000000 --- a/pkg/watchers/certificates_watcher_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package watchers - -import ( - "context" - "testing" - - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type ClientMock struct { - mock.Mock - client.Client -} - -func (o *ClientMock) Get(ctx context.Context, nsname types.NamespacedName, obj client.Object) error { - args := o.Called(ctx, nsname, obj) - return args.Error(0) -} - -func (o *ClientMock) mockObject(name, ns string, meta *v1.ObjectMeta) { - o.On("Get", mock.Anything, types.NamespacedName{Namespace: ns, Name: name}, mock.Anything).Run(func(args mock.Arguments) { - arg := args.Get(2).(client.Object) - arg.SetUID(meta.GetUID()) - arg.SetResourceVersion(meta.GetResourceVersion()) - }).Return(nil) -} - -var lokiCA = corev1.ConfigMap{ - ObjectMeta: v1.ObjectMeta{ - UID: "abcd", - ResourceVersion: "1234", - }, -} -var kafkaCA = corev1.ConfigMap{ - ObjectMeta: v1.ObjectMeta{ - UID: "efg", - ResourceVersion: "567", - }, -} -var kafkaUser = corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - UID: "hij", - ResourceVersion: "890", - }, -} - -func TestWatchingCertificates(t *testing.T) { - assert := assert.New(t) - clientMock := ClientMock{} - clientMock.mockObject("loki-ca", "ns", &lokiCA.ObjectMeta) - clientMock.mockObject("kafka-ca", "ns", &kafkaCA.ObjectMeta) - clientMock.mockObject("kafka-user", "ns", &kafkaUser.ObjectMeta) - - builder := builder.Builder{} - watcher := RegisterCertificatesWatcher(&builder) - assert.NotNil(watcher) - - watcher.Reset("ns") - watcher.SetWatchedCertificate("loki-certificate-ca", &flowslatest.CertificateReference{ - Type: flowslatest.CertRefTypeConfigMap, - Name: "loki-ca", - CertFile: "ca.crt", - }) - - // isWatched only true for loki-ca in namespace ns - assert.True(watcher.isWatched(flowslatest.CertRefTypeConfigMap, &corev1.ConfigMap{ObjectMeta: v1.ObjectMeta{Name: "loki-ca", Namespace: "ns"}})) - assert.False(watcher.isWatched(flowslatest.CertRefTypeConfigMap, &corev1.ConfigMap{ObjectMeta: v1.ObjectMeta{Name: "loki-ca-other", Namespace: "ns"}})) - assert.False(watcher.isWatched(flowslatest.CertRefTypeConfigMap, &corev1.ConfigMap{ObjectMeta: v1.ObjectMeta{Name: "loki-ca", Namespace: "other-ns"}})) - assert.False(watcher.isWatched(flowslatest.CertRefTypeSecret, &corev1.Secret{ObjectMeta: v1.ObjectMeta{Name: "loki-ca", Namespace: "ns"}})) - - pod := corev1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Annotations: map[string]string{}, - }, - } - err := watcher.AnnotatePod(context.Background(), &clientMock, &pod, "loki-certificate", "kafka-certificate") - assert.NoError(err) - - // Pod annotated with info from the loki-ca configmap - assert.Equal(map[string]string{"flows.netobserv.io/cert-loki-certificate-ca": "abcd/1234"}, pod.Annotations) - - watcher.SetWatchedCertificate("kafka-certificate-ca", &flowslatest.CertificateReference{ - Type: flowslatest.CertRefTypeConfigMap, - Name: "kafka-ca", - CertFile: "ca.crt", - }) - - watcher.SetWatchedCertificate("kafka-certificate-user", &flowslatest.CertificateReference{ - Type: flowslatest.CertRefTypeSecret, - Name: "kafka-user", - CertFile: "user.crt", - CertKey: "user.key", - }) - - err = watcher.AnnotatePod(context.Background(), &clientMock, &pod, "loki-certificate", "kafka-certificate") - assert.NoError(err) - - // Pod annotated with info from the kafka-ca configmap and kafka-user secret - assert.Equal(map[string]string{ - "flows.netobserv.io/cert-loki-certificate-ca": "abcd/1234", - "flows.netobserv.io/cert-kafka-certificate-ca": "efg/567", - "flows.netobserv.io/cert-kafka-certificate-user": "hij/890", - }, pod.Annotations) - - kafkaUser.SetResourceVersion("xxx") - err = watcher.AnnotatePod(context.Background(), &clientMock, &pod, "loki-certificate", "kafka-certificate") - assert.NoError(err) - - // kafka-user secret updated with the new annotation - assert.Equal(map[string]string{ - "flows.netobserv.io/cert-loki-certificate-ca": "abcd/1234", - "flows.netobserv.io/cert-kafka-certificate-ca": "efg/567", - "flows.netobserv.io/cert-kafka-certificate-user": "hij/xxx", - }, pod.Annotations) - -} diff --git a/pkg/watchers/object_ref.go b/pkg/watchers/object_ref.go new file mode 100644 index 000000000..f1b5f431d --- /dev/null +++ b/pkg/watchers/object_ref.go @@ -0,0 +1,35 @@ +package watchers + +import ( + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" +) + +type objectRef struct { + kind flowslatest.MountableType + name string + namespace string + keys []string +} + +type ConfigOrSecret struct { + Type flowslatest.MountableType + Name string + Namespace string +} + +func (w *Watcher) refFromCert(cert *flowslatest.CertificateReference) objectRef { + ns := cert.Namespace + if ns == "" { + ns = w.defaultNamespace + } + keys := []string{cert.CertFile} + if cert.CertKey != "" { + keys = append(keys, cert.CertKey) + } + return objectRef{ + kind: cert.Type, + name: cert.Name, + namespace: ns, + keys: keys, + } +} diff --git a/pkg/watchers/watchables.go b/pkg/watchers/watchables.go new file mode 100644 index 000000000..921f10c45 --- /dev/null +++ b/pkg/watchers/watchables.go @@ -0,0 +1,82 @@ +package watchers + +import ( + "encoding/base64" + "encoding/json" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Watchable interface { + ProvidePlaceholder() client.Object + GetDigest(client.Object, []string) (string, error) + PrepareForCreate(client.Object, *metav1.ObjectMeta) + PrepareForUpdate(client.Object, client.Object) +} + +type SecretWatchable struct { + Watchable +} + +func (w *SecretWatchable) ProvidePlaceholder() client.Object { + return &corev1.Secret{} +} + +func (w *SecretWatchable) GetDigest(obj client.Object, keys []string) (string, error) { + secret := obj.(*corev1.Secret) + return getDigest(keys, func(k string) interface{} { + return secret.Data[k] + }) +} + +func (w *SecretWatchable) PrepareForCreate(obj client.Object, m *metav1.ObjectMeta) { + fromSecret := obj.(*corev1.Secret) + fromSecret.ObjectMeta = *m +} + +func (w *SecretWatchable) PrepareForUpdate(from, to client.Object) { + fromSecret := from.(*corev1.Secret) + toSecret := to.(*corev1.Secret) + toSecret.Data = fromSecret.Data +} + +type ConfigWatchable struct { + Watchable +} + +func (w *ConfigWatchable) ProvidePlaceholder() client.Object { + return &corev1.ConfigMap{} +} + +func (w *ConfigWatchable) GetDigest(obj client.Object, keys []string) (string, error) { + cm := obj.(*corev1.ConfigMap) + return getDigest(keys, func(k string) interface{} { + return cm.Data[k] + }) +} + +func (w *ConfigWatchable) PrepareForCreate(obj client.Object, m *metav1.ObjectMeta) { + fromSecret := obj.(*corev1.ConfigMap) + fromSecret.ObjectMeta = *m +} + +func (w *ConfigWatchable) PrepareForUpdate(from, to client.Object) { + fromSecret := from.(*corev1.ConfigMap) + toSecret := to.(*corev1.ConfigMap) + toSecret.Data = fromSecret.Data +} + +func getDigest(keys []string, chunker func(string) interface{}) (string, error) { + // Inspired from https://github.com/openshift/library-go/blob/master/pkg/operator/resource/resourcehash/as_configmap.go + hasher := fnv.New32() + encoder := json.NewEncoder(hasher) + for _, k := range keys { + if err := encoder.Encode(chunker(k)); err != nil { + return "", err + } + } + return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil +} diff --git a/pkg/watchers/watcher.go b/pkg/watchers/watcher.go new file mode 100644 index 000000000..ca3c76568 --- /dev/null +++ b/pkg/watchers/watcher.go @@ -0,0 +1,157 @@ +package watchers + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + rec "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/pkg/helper" +) + +type Watcher struct { + watched map[string]interface{} + defaultNamespace string + secrets SecretWatchable + configs ConfigWatchable +} + +func NewWatcher() Watcher { + return Watcher{ + watched: make(map[string]interface{}), + } +} + +func RegisterWatcher(builder *builder.Builder) *Watcher { + w := NewWatcher() + w.registerWatches(builder, &w.secrets, flowslatest.RefTypeSecret) + w.registerWatches(builder, &w.configs, flowslatest.RefTypeConfigMap) + return &w +} + +func (w *Watcher) registerWatches(builder *builder.Builder, watchable Watchable, kind flowslatest.MountableType) { + builder.Watches( + &source.Kind{Type: watchable.ProvidePlaceholder()}, + handler.EnqueueRequestsFromMapFunc(func(o client.Object) []rec.Request { + if w.isWatched(kind, o.GetName(), o.GetNamespace()) { + // Trigger FlowCollector reconcile + return []rec.Request{{NamespacedName: constants.FlowCollectorName}} + } + return []rec.Request{} + }), + ) +} + +func (w *Watcher) Reset(namespace string) { + w.defaultNamespace = namespace + w.watched = make(map[string]interface{}) +} + +func key(kind flowslatest.MountableType, name, namespace string) string { + return string(kind) + "/" + namespace + "/" + name +} + +func (w *Watcher) watch(kind flowslatest.MountableType, name, namespace string) { + w.watched[key(kind, name, namespace)] = true +} + +func (w *Watcher) isWatched(kind flowslatest.MountableType, name, namespace string) bool { + if _, ok := w.watched[key(kind, name, namespace)]; ok { + return true + } + return false +} + +func (w *Watcher) ProcessMTLSCerts(ctx context.Context, cl helper.Client, tls *flowslatest.ClientTLS, targetNamespace string) (caDigest string, userDigest string, err error) { + if tls.Enable && tls.CACert.Name != "" { + caRef := w.refFromCert(&tls.CACert) + caDigest, err = w.reconcile(ctx, cl, caRef, targetNamespace) + if err != nil { + return "", "", err + } + } + if tls.Enable && tls.UserCert.Name != "" { + userRef := w.refFromCert(&tls.UserCert) + userDigest, err = w.reconcile(ctx, cl, userRef, targetNamespace) + if err != nil { + return "", "", err + } + } + return caDigest, userDigest, nil +} + +func (w *Watcher) ProcessCACert(ctx context.Context, cl helper.Client, tls *flowslatest.ClientTLS, targetNamespace string) (caDigest string, err error) { + if tls.Enable && tls.CACert.Name != "" { + caRef := w.refFromCert(&tls.CACert) + caDigest, err = w.reconcile(ctx, cl, caRef, targetNamespace) + if err != nil { + return "", err + } + } + return caDigest, nil +} + +func (w *Watcher) reconcile(ctx context.Context, cl helper.Client, ref objectRef, destNamespace string) (string, error) { + rlog := log.FromContext(ctx, "Name", ref.name, "Source namespace", ref.namespace, "Target namespace", destNamespace) + + w.watch(ref.kind, ref.name, ref.namespace) + var watchable Watchable + if ref.kind == flowslatest.RefTypeConfigMap { + watchable = &w.configs + } else { + watchable = &w.secrets + } + + obj := watchable.ProvidePlaceholder() + err := cl.Get(ctx, types.NamespacedName{Name: ref.name, Namespace: ref.namespace}, obj) + if err != nil { + return "", err + } + digest, err := watchable.GetDigest(obj, ref.keys) + if err != nil { + return "", err + } + if ref.namespace != destNamespace { + // copy to namespace + target := watchable.ProvidePlaceholder() + err := cl.Get(ctx, types.NamespacedName{Name: ref.name, Namespace: destNamespace}, target) + if err != nil { + if !errors.IsNotFound(err) { + return "", err + } + rlog.Info(fmt.Sprintf("creating %s %s in namespace %s", ref.kind, ref.name, destNamespace)) + watchable.PrepareForCreate(obj, &metav1.ObjectMeta{ + Name: ref.name, + Namespace: destNamespace, + Annotations: map[string]string{ + constants.NamespaceCopyAnnotation: ref.namespace + "/" + ref.name, + }, + }) + if err := cl.CreateOwned(ctx, obj); err != nil { + return "", err + } + } else { + // Update existing + rlog.Info(fmt.Sprintf("updating %s %s in namespace %s", ref.kind, ref.name, destNamespace)) + watchable.PrepareForUpdate(obj, target) + if err := cl.UpdateOwned(ctx, target, target); err != nil { + return "", err + } + } + } + return digest, nil +} + +func Annotation(key string) string { + return constants.PodWatchedSuffix + key +} diff --git a/pkg/watchers/watcher_test.go b/pkg/watchers/watcher_test.go new file mode 100644 index 000000000..a95128fde --- /dev/null +++ b/pkg/watchers/watcher_test.go @@ -0,0 +1,209 @@ +package watchers + +import ( + "context" + "testing" + + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/builder" +) + +const baseNamespace = "base-ns" +const otherNamespace = "other-ns" + +var lokiCA = corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: "loki-ca", + Namespace: baseNamespace, + }, + Data: map[string]string{ + "tls.crt": " -- LOKI CA --", + }, +} +var lokiTLS = flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: lokiCA.Name, + CertFile: "tls.crt", + }, +} +var otherLokiCA = corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: "other-loki-ca", + Namespace: otherNamespace, + }, + Data: map[string]string{ + "tls.crt": " -- LOKI OTHER CA --", + }, +} +var otherLokiTLS = flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: otherLokiCA.Name, + Namespace: otherLokiCA.Namespace, + CertFile: "tls.crt", + }, +} +var kafkaCA = corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: "kafka-ca", + Namespace: baseNamespace, + }, + Data: map[string]string{ + "ca.crt": " -- KAFKA CA --", + }, +} +var kafkaUser = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "kafka-user", + Namespace: baseNamespace, + }, + Data: map[string][]byte{ + "user.crt": []byte(" -- KAFKA USER CERT --"), + "user.key": []byte(" -- KAFKA USER KEY --"), + }, +} +var kafkaMTLS = flowslatest.ClientTLS{ + Enable: true, + CACert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeConfigMap, + Name: kafkaCA.Name, + CertFile: "ca.crt", + }, + UserCert: flowslatest.CertificateReference{ + Type: flowslatest.RefTypeSecret, + Name: kafkaUser.Name, + CertFile: "user.crt", + CertKey: "user.key", + }, +} +var kafkaSaslSecret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "kafka-sasl", + Namespace: baseNamespace, + }, + Data: map[string][]byte{ + "id": []byte("me"), + "token": []byte("ssssaaaaassssslllll"), + }, +} + +func TestGenDigests(t *testing.T) { + assert := assert.New(t) + clientMock := test.ClientMock{} + clientMock.MockConfigMap(&lokiCA) + clientMock.MockConfigMap(&kafkaCA) + clientMock.MockSecret(&kafkaUser) + clientMock.MockSecret(&kafkaSaslSecret) + + builder := builder.Builder{} + watcher := RegisterWatcher(&builder) + assert.NotNil(watcher) + watcher.Reset(baseNamespace) + cl := helper.UnmanagedClient(&clientMock) + + digLoki, err := watcher.ProcessCACert(context.Background(), cl, &lokiTLS, baseNamespace) + assert.NoError(err) + assert.Equal("XDamCg==", digLoki) + + // Same output expected from MTLS func + dig1, dig2, err := watcher.ProcessMTLSCerts(context.Background(), cl, &lokiTLS, baseNamespace) + assert.NoError(err) + assert.Equal("XDamCg==", dig1) + assert.Equal("", dig2) + + // Different output for kafka certs + dig1, dig2, err = watcher.ProcessMTLSCerts(context.Background(), cl, &kafkaMTLS, baseNamespace) + assert.NoError(err) + assert.Equal("EPFv4Q==", dig1) + assert.Equal("bNKS0Q==", dig2) + + // Update object, verify the digest has changed + copy := lokiCA + copy.Data["tls.crt"] = " -- LOKI CA MODIFIED --" + clientMock.UpdateObject(©) + + digUpdated, err := watcher.ProcessCACert(context.Background(), cl, &lokiTLS, baseNamespace) + assert.NoError(err) + assert.NotEqual(digLoki, digUpdated) + assert.Equal("Hb65OQ==", digUpdated) + + // Update another key in object, verify the digest hasn't changed + copy.Data["other"] = " -- OTHER --" + clientMock.UpdateObject(©) + + digUpdated2, err := watcher.ProcessCACert(context.Background(), cl, &lokiTLS, baseNamespace) + assert.NoError(err) + assert.Equal(digUpdated2, digUpdated) +} + +func TestNoCopy(t *testing.T) { + assert := assert.New(t) + clientMock := test.ClientMock{} + clientMock.MockConfigMap(&lokiCA) + + builder := builder.Builder{} + watcher := RegisterWatcher(&builder) + assert.NotNil(watcher) + watcher.Reset(baseNamespace) + cl := helper.UnmanagedClient(&clientMock) + + _, _, err := watcher.ProcessMTLSCerts(context.Background(), cl, &lokiTLS, baseNamespace) + assert.NoError(err) + clientMock.AssertCalled(t, "Get", mock.Anything, types.NamespacedName{Name: lokiCA.Name, Namespace: lokiCA.Namespace}, mock.Anything) + clientMock.AssertNotCalled(t, "Create") + clientMock.AssertNotCalled(t, "Update") +} + +func TestCopyCertificate(t *testing.T) { + assert := assert.New(t) + clientMock := test.ClientMock{} + clientMock.MockConfigMap(&otherLokiCA) + clientMock.MockNonExisting(types.NamespacedName{Namespace: baseNamespace, Name: otherLokiCA.Name}) + clientMock.MockCreateUpdate() + + builder := builder.Builder{} + watcher := RegisterWatcher(&builder) + assert.NotNil(watcher) + watcher.Reset(baseNamespace) + cl := helper.UnmanagedClient(&clientMock) + + _, _, err := watcher.ProcessMTLSCerts(context.Background(), cl, &otherLokiTLS, baseNamespace) + assert.NoError(err) + clientMock.AssertCalled(t, "Get", mock.Anything, types.NamespacedName{Name: otherLokiCA.Name, Namespace: otherLokiCA.Namespace}, mock.Anything) + clientMock.AssertCalled(t, "Get", mock.Anything, types.NamespacedName{Name: otherLokiCA.Name, Namespace: baseNamespace}, mock.Anything) + clientMock.AssertCalled(t, "Create", mock.Anything, mock.Anything, mock.Anything) + clientMock.AssertNotCalled(t, "Update") +} + +func TestUpdateCertificate(t *testing.T) { + assert := assert.New(t) + clientMock := test.ClientMock{} + clientMock.MockConfigMap(&otherLokiCA) + copied := otherLokiCA + copied.Namespace = baseNamespace + clientMock.MockConfigMap(&copied) + clientMock.MockCreateUpdate() + + builder := builder.Builder{} + watcher := RegisterWatcher(&builder) + assert.NotNil(watcher) + watcher.Reset(baseNamespace) + cl := helper.UnmanagedClient(&clientMock) + + _, _, err := watcher.ProcessMTLSCerts(context.Background(), cl, &otherLokiTLS, baseNamespace) + assert.NoError(err) + clientMock.AssertCalled(t, "Get", mock.Anything, types.NamespacedName{Name: otherLokiCA.Name, Namespace: otherLokiCA.Namespace}, mock.Anything) + clientMock.AssertCalled(t, "Get", mock.Anything, types.NamespacedName{Name: otherLokiCA.Name, Namespace: baseNamespace}, mock.Anything) + clientMock.AssertNotCalled(t, "Create") + clientMock.AssertCalled(t, "Update", mock.Anything, mock.Anything, mock.Anything) +}