From 6f4c39dab994685ce0c24d72e3856b8573dc4993 Mon Sep 17 00:00:00 2001 From: acmenezes Date: Fri, 16 Jun 2023 16:30:30 -0400 Subject: [PATCH] ADD v1beta2 with new Loki integration fields Signed-off-by: acmenezes --- Makefile | 7 + api/v1alpha1/doc.go | 2 +- api/v1alpha1/flowcollector_types.go | 1 + api/v1alpha1/flowcollector_webhook.go | 61 +- api/v1alpha1/zz_generated.conversion.go | 691 ++- api/v1beta1/doc.go | 1 + api/v1beta1/flowcollector_types.go | 2 +- api/v1beta1/flowcollector_webhook.go | 108 +- api/v1beta1/groupversion_info.go | 3 +- api/v1beta1/zz_generated.conversion.go | 1024 ++++ api/v1beta2/doc.go | 15 + api/v1beta2/flowcollector_types.go | 787 ++++ api/v1beta2/flowcollector_webhook.go | 32 + api/v1beta2/groupversion_info.go | 36 + api/v1beta2/zz_generated.deepcopy.go | 614 +++ .../flows.netobserv.io_flowcollectors.yaml | 2351 ++++++++++ .../samples/flows_v1beta2_flowcollector.yaml | 147 + .../consoleplugin/consoleplugin_objects.go | 10 +- .../consoleplugin/consoleplugin_reconciler.go | 16 +- .../consoleplugin/consoleplugin_test.go | 24 +- controllers/ebpf/agent_controller.go | 2 +- .../ebpf/internal/permissions/permissions.go | 2 +- controllers/flowcollector_controller.go | 2 +- ...wcollector_controller_certificates_test.go | 6 +- .../flowcollector_controller_console_test.go | 6 +- .../flowcollector_controller_ebpf_test.go | 2 +- .../flowcollector_controller_iso_test.go | 6 +- controllers/flowcollector_controller_test.go | 8 +- .../flowlogspipeline/flp_common_objects.go | 26 +- .../flowlogspipeline/flp_ingest_objects.go | 2 +- .../flowlogspipeline/flp_ingest_reconciler.go | 2 +- .../flowlogspipeline/flp_monolith_objects.go | 2 +- .../flp_monolith_reconciler.go | 4 +- .../flowlogspipeline/flp_reconciler.go | 2 +- controllers/flowlogspipeline/flp_test.go | 22 +- .../flowlogspipeline/flp_transfo_objects.go | 2 +- .../flp_transfo_reconciler.go | 4 +- controllers/ovs/flowsconfig_cno_reconciler.go | 2 +- .../ovs/flowsconfig_ovnk_reconciler.go | 2 +- controllers/ovs/flowsconfig_types.go | 2 +- controllers/suite_test.go | 4 + docs/FlowCollector.md | 4144 +++++++++++++++++ main.go | 4 +- pkg/helper/comparators.go | 2 +- pkg/helper/flowcollector.go | 14 +- pkg/volumes/builder.go | 2 +- pkg/watchers/object_ref.go | 2 +- pkg/watchers/watcher.go | 2 +- pkg/watchers/watcher_test.go | 2 +- 49 files changed, 9722 insertions(+), 492 deletions(-) create mode 100644 api/v1beta1/zz_generated.conversion.go create mode 100644 api/v1beta2/doc.go create mode 100644 api/v1beta2/flowcollector_types.go create mode 100644 api/v1beta2/flowcollector_webhook.go create mode 100644 api/v1beta2/groupversion_info.go create mode 100644 api/v1beta2/zz_generated.deepcopy.go create mode 100644 config/samples/flows_v1beta2_flowcollector.yaml diff --git a/Makefile b/Makefile index 41aa53d8a..bf21fe13a 100644 --- a/Makefile +++ b/Makefile @@ -249,6 +249,13 @@ generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1beta1" + $(CONVERSION_GEN) \ + --input-dirs=./api/v1beta1 \ + --build-tag=ignore_autogenerated_core \ + --output-file-base=zz_generated.conversion \ + $(CONVERSION_GEN_OUTPUT_BASE) \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt generate: gencode manifests doc generate-go-conversions ## Run all code/file generators diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go index c49908aa2..c0105103f 100644 --- a/api/v1alpha1/doc.go +++ b/api/v1alpha1/doc.go @@ -12,5 +12,5 @@ limitations under the License. */ // Package v1aplha1 contains the v1alpha1 API implementation. -// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta1 +// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta2 package v1alpha1 diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index 36613224c..dc9ae8aaa 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -55,6 +55,7 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. + // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // consolePlugin defines the settings related to the OpenShift Console plugin, when available. diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index b8bf992f5..736726755 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -20,26 +20,26 @@ import ( "fmt" "reflect" - "github.com/netobserv/network-observability-operator/api/v1beta1" + "github.com/netobserv/network-observability-operator/api/v1beta2" utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" ) -// ConvertTo converts this v1alpha1 FlowCollector to its v1beta1 equivalent (the conversion Hub) +// ConvertTo converts this v1alpha1 FlowCollector to its v1beta2 equivalent (the conversion Hub) // https://book.kubebuilder.io/multiversion-tutorial/conversion.html func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.FlowCollector) + dst := dstRaw.(*v1beta2.FlowCollector) - if err := Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(r, dst, nil); err != nil { - return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta1.FlowCollector: %w", err) + if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { + return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta2.FlowCollector: %w", err) } dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) copy(dst.Status.Conditions, r.Status.Conditions) // Manually restore data. - restored := &v1beta1.FlowCollector{} + restored := &v1beta2.FlowCollector{} if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { return err } @@ -62,7 +62,7 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts } - dst.Spec.Loki.StatusTLS = restored.Spec.Loki.StatusTLS + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual if restored.Spec.Exporters != nil { for _, restoredExp := range restored.Spec.Exporters { @@ -75,7 +75,7 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { return nil } -func isExporterIn(restoredExporter *v1beta1.FlowCollectorExporter, dstExporters []*v1beta1.FlowCollectorExporter) bool { +func isExporterIn(restoredExporter *v1beta2.FlowCollectorExporter, dstExporters []*v1beta2.FlowCollectorExporter) bool { for _, dstExp := range dstExporters { if reflect.DeepEqual(restoredExporter, dstExp) { @@ -85,12 +85,12 @@ func isExporterIn(restoredExporter *v1beta1.FlowCollectorExporter, dstExporters return false } -// ConvertFrom converts the hub version v1beta1 FlowCollector object to v1alpha1 +// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1alpha1 func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.FlowCollector) + src := srcRaw.(*v1beta2.FlowCollector) - if err := Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { - return fmt.Errorf("copying v1beta1.FlowCollector into v1alpha1.FlowCollector: %w", err) + if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { + return fmt.Errorf("copying v1beta2.FlowCollector into v1alpha1.FlowCollector: %w", err) } r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) copy(r.Status.Conditions, src.Status.Conditions) @@ -103,39 +103,46 @@ func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { } func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.FlowCollectorList) - return Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(r, dst, nil) + dst := dstRaw.(*v1beta2.FlowCollectorList) + return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) } func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.FlowCollectorList) - return Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) + src := srcRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta1.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) +func Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) +func Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta1 not in v1alpha1 +// we have new defined fields in v1beta2 not in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta1.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) +func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta1.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s) +func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s) } diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 92e3095f6..b73d67e5e 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -24,7 +24,7 @@ package v1alpha1 import ( unsafe "unsafe" - v1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + v1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" @@ -38,241 +38,241 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta1.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(a.(*CertificateReference), b.(*v1beta1.CertificateReference), scope) + if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta1.CertificateReference), b.(*CertificateReference), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta1.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(a.(*ClientTLS), b.(*v1beta1.ClientTLS), scope) + if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta1.ClientTLS), b.(*ClientTLS), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta1.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta1.ClusterNetworkOperatorConfig), scope) + if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta1.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta1.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta1.ConsolePluginPortConfig), scope) + if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta1.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta1.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(a.(*DebugConfig), b.(*v1beta1.DebugConfig), scope) + if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(a.(*v1beta1.DebugConfig), b.(*DebugConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta1.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(a.(*FLPMetrics), b.(*v1beta1.FLPMetrics), scope) + if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta1.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(a.(*FlowCollector), b.(*v1beta1.FlowCollector), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta1.FlowCollector), b.(*FlowCollector), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta1.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta1.FlowCollectorAgent), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta1.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta1.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta1.FlowCollectorConsolePlugin), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta1.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta1.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta1.FlowCollectorEBPF), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta1.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta1.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta1.FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta1.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta1.FlowCollectorFLP), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta1.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta1.FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta1.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta1.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta1.FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta1.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta1.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta1.FlowCollectorKafka), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta1.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta1.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta1.FlowCollectorList), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta1.FlowCollectorList), b.(*FlowCollectorList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorLoki)(nil), (*v1beta1.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta1.FlowCollectorLoki), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta1.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta1.FlowCollectorSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta1.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta1.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta1.FlowCollectorStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta1.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) + if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta1.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta1.MetricsServerConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta1.MetricsServerConfig), b.(*MetricsServerConfig), scope) + if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta1.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta1.OVNKubernetesConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta1.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) + if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta1.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(a.(*QuickFilter), b.(*v1beta1.QuickFilter), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta1.QuickFilter), b.(*QuickFilter), scope) + if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta1.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(a.(*ServerTLS), b.(*v1beta1.ServerTLS), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta1.ServerTLS), b.(*ServerTLS), scope) + if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta1.FLPMetrics), b.(*FLPMetrics), scope) + if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta1.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta1.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta1.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) + if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) }); err != nil { return err } return nil } -func autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *CertificateReference, out *v1beta1.CertificateReference, s conversion.Scope) error { - out.Type = v1beta1.MountableType(in.Type) +func autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) out.Name = in.Name out.CertFile = in.CertFile out.CertKey = in.CertKey @@ -280,12 +280,12 @@ func autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(i return nil } -// Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference is an autogenerated conversion function. -func Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in *CertificateReference, out *v1beta1.CertificateReference, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(in, out, s) +// Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. +func Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + return autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) } -func autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta1.CertificateReference, out *CertificateReference, s conversion.Scope) error { +func autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { out.Type = MountableType(in.Type) out.Name = in.Name out.Namespace = in.Namespace @@ -294,66 +294,66 @@ func autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(i return nil } -// Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. -func Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta1.CertificateReference, out *CertificateReference, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) +// Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. +func Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) } -func autoConvert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in *ClientTLS, out *v1beta1.ClientTLS, s conversion.Scope) error { +func autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { out.Enable = in.Enable out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { return err } - if err := Convert_v1alpha1_CertificateReference_To_v1beta1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { return err } return nil } -// Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS is an autogenerated conversion function. -func Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in *ClientTLS, out *v1beta1.ClientTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(in, out, s) +// Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. +func Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + return autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) } -func autoConvert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta1.ClientTLS, out *ClientTLS, s conversion.Scope) error { +func autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { out.Enable = in.Enable out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { return err } - if err := Convert_v1beta1_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { return err } return nil } -// Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. -func Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta1.ClientTLS, out *ClientTLS, s conversion.Scope) error { - return autoConvert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) +// Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. +func Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) } -func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta1.ClusterNetworkOperatorConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { out.Namespace = in.Namespace return nil } -// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta1.ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in, out, s) +// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) } -func autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta1.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { +func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { out.Namespace = in.Namespace return nil } -// Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta1.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) +// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) } -func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta1.ConsolePluginPortConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.Enable, &out.Enable, s); err != nil { return err } @@ -361,12 +361,12 @@ func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortCo return nil } -// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta1.ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in, out, s) +// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) } -func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta1.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { +func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { if err := v1.Convert_Pointer_bool_To_bool(&in.Enable, &out.Enable, s); err != nil { return err } @@ -374,46 +374,46 @@ func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortCo return nil } -// Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta1.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) +// Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) } -func autoConvert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in *DebugConfig, out *v1beta1.DebugConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) return nil } -// Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig is an autogenerated conversion function. -func Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in *DebugConfig, out *v1beta1.DebugConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(in, out, s) +// Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function. +func Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in, out, s) } -func autoConvert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta1.DebugConfig, out *DebugConfig, s conversion.Scope) error { +func autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) return nil } -// Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig is an autogenerated conversion function. -func Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta1.DebugConfig, out *DebugConfig, s conversion.Scope) error { - return autoConvert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(in, out, s) +// Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig is an autogenerated conversion function. +func Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in, out, s) } -func autoConvert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in *FLPMetrics, out *v1beta1.FLPMetrics, s conversion.Scope) error { - if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { +func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) return nil } -// Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics is an autogenerated conversion function. -func Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in *FLPMetrics, out *v1beta1.FLPMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) +// Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. +func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) } -func autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { - if err := Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { +func autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) @@ -421,71 +421,71 @@ func autoConvert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta1.FLPMetric return nil } -func autoConvert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in *FlowCollector, out *v1beta1.FlowCollector, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in *FlowCollector, out *v1beta1.FlowCollector, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(in, out, s) +// Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) } -func autoConvert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta1.FlowCollector, out *FlowCollector, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. -func Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta1.FlowCollector, out *FlowCollector, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) +// Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. +func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta1.FlowCollectorAgent, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta1.FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s) +// Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) } -func autoConvert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta1.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta1.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s) +// Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta1.FlowCollectorConsolePlugin, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.Register, &out.Register, s); err != nil { return err } @@ -496,22 +496,22 @@ func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorCon out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.LogLevel = in.LogLevel - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { return err } - if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { return err } - out.QuickFilters = *(*[]v1beta1.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) return nil } -// Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta1.FlowCollectorConsolePlugin, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) +// Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) } -func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta1.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { if err := v1.Convert_Pointer_bool_To_bool(&in.Register, &out.Register, s); err != nil { return err } @@ -522,22 +522,22 @@ func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.LogLevel = in.LogLevel - if err := Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { return err } - if err := Convert_v1beta1_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { return err } out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) return nil } -// Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta1.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) +// Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta1.FlowCollectorEBPF, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) @@ -548,18 +548,18 @@ func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *Flo out.LogLevel = in.LogLevel out.Privileged = in.Privileged out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta1.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) +// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) } -func autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta1.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { out.ImagePullPolicy = in.ImagePullPolicy out.Resources = in.Resources out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) @@ -570,45 +570,45 @@ func autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1b out.LogLevel = in.LogLevel out.Privileged = in.Privileged out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta1.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) +// Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta1.FlowCollectorExporter, s conversion.Scope) error { - out.Type = v1beta1.ExporterType(in.Type) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { +func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + out.Type = v1beta2.ExporterType(in.Type) + if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta1.FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s) +// Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) } -func autoConvert_v1beta1_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta1.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { out.Type = ExporterType(in.Type) - if err := Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type return nil } -func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta1.FlowCollectorFLP, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort out.ProfilePort = in.ProfilePort out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1alpha1_FLPMetrics_To_v1beta1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + if err := Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { return err } out.LogLevel = in.LogLevel @@ -622,28 +622,28 @@ func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowC if err := v1.Convert_int32_To_Pointer_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { return err } out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta1.FlowCollectorFLP, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) +// Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) } -func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta1.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort out.ProfilePort = in.ProfilePort out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1beta1_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + if err := Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { return err } out.LogLevel = in.LogLevel @@ -657,7 +657,7 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet if err := v1.Convert_Pointer_int32_To_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { return err } out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity @@ -666,13 +666,13 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet // WARNING: in.ConversationHeartbeatInterval requires manual conversion: does not exist in peer-type // WARNING: in.ConversationEndTimeout requires manual conversion: does not exist in peer-type // WARNING: in.ConversationTerminatingTimeout requires manual conversion: does not exist in peer-type - if err := Convert_v1beta1_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { return err } return nil } -func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta1.FlowCollectorHPA, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { out.Status = in.Status out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas @@ -680,12 +680,12 @@ func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowC return nil } -// Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta1.FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) +// Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) } -func autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta1.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { out.Status = in.Status out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas @@ -693,84 +693,84 @@ func autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1bet return nil } -// Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta1.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) +// Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta1.FlowCollectorIPFIX, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { out.CacheActiveTimeout = in.CacheActiveTimeout out.CacheMaxFlows = in.CacheMaxFlows out.Sampling = in.Sampling out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { return err } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta1.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) +// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) } -func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta1.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { out.CacheActiveTimeout = in.CacheActiveTimeout out.CacheMaxFlows = in.CacheMaxFlows out.Sampling = in.Sampling out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { return err } - if err := Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta1.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) +// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta1.FlowCollectorKafka, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic - if err := Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta1.FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in, out, s) +// Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) } -func autoConvert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta1.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic - if err := Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta1.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) +// Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *FlowCollectorList, out *v1beta1.FlowCollectorList, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1beta1.FlowCollector, len(*in)) + *out = make([]v1beta2.FlowCollector, len(*in)) for i := range *in { - if err := Convert_v1alpha1_FlowCollector_To_v1beta1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -780,18 +780,18 @@ func autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *Flo return nil } -// Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in *FlowCollectorList, out *v1beta1.FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorList_To_v1beta1_FlowCollectorList(in, out, s) +// Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) } -func autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta1.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]FlowCollector, len(*in)) for i := range *in { - if err := Convert_v1beta1_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -801,227 +801,198 @@ func autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1b return nil } -// Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta1.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) +// Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta1.FlowCollectorLoki, s conversion.Scope) error { - out.URL = in.URL - out.QuerierURL = in.QuerierURL - out.StatusURL = in.StatusURL - out.TenantID = in.TenantID - out.AuthToken = in.AuthToken - out.BatchWait = in.BatchWait - out.BatchSize = in.BatchSize - out.Timeout = in.Timeout - out.MinBackoff = in.MinBackoff - out.MaxBackoff = in.MaxBackoff - if err := v1.Convert_int32_To_Pointer_int32(&in.MaxRetries, &out.MaxRetries, s); err != nil { - return err - } - out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) - if err := Convert_v1alpha1_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } +func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.URL requires manual conversion: does not exist in peer-type + // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type + // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type + // WARNING: in.TenantID requires manual conversion: does not exist in peer-type + // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type + // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type + // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type + // WARNING: in.Timeout requires manual conversion: does not exist in peer-type + // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type + // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type + // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type + // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type + // WARNING: in.TLS requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta1.FlowCollectorLoki, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in, out, s) -} - -func autoConvert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta1.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { - out.URL = in.URL - out.QuerierURL = in.QuerierURL - out.StatusURL = in.StatusURL - out.TenantID = in.TenantID - out.AuthToken = in.AuthToken - out.BatchWait = in.BatchWait - out.BatchSize = in.BatchSize - out.Timeout = in.Timeout - out.MinBackoff = in.MinBackoff - out.MaxBackoff = in.MaxBackoff - if err := v1.Convert_Pointer_int32_To_int32(&in.MaxRetries, &out.MaxRetries, s); err != nil { - return err - } - out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels)) - if err := Convert_v1beta1_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - // WARNING: in.StatusTLS requires manual conversion: does not exist in peer-type +func autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.Mode requires manual conversion: does not exist in peer-type + // WARNING: in.Manual requires manual conversion: does not exist in peer-type return nil } -func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta1.FlowCollectorSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace - if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } out.DeploymentModel = in.DeploymentModel - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta1.FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s) +// Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) } -func autoConvert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta1.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace - if err := Convert_v1beta1_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1beta1_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + if err := Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { + if err := Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } out.DeploymentModel = in.DeploymentModel - if err := Convert_v1beta1_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta1.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s) +// Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta1.FlowCollectorStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace return nil } -// Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta1.FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in, out, s) +// Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) } -func autoConvert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta1.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { +func autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace return nil } -// Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta1.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) +// Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) } -func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *MetricsServerConfig, out *v1beta1.MetricsServerConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { out.Port = in.Port - if err := Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *MetricsServerConfig, out *v1beta1.MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in, out, s) +// Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) } -func autoConvert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta1.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { +func autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { out.Port = in.Port - if err := Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + if err := Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { return err } return nil } -// Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta1.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1beta1_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) +// Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) } -func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta1.OVNKubernetesConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { out.Namespace = in.Namespace out.DaemonSetName = in.DaemonSetName out.ContainerName = in.ContainerName return nil } -// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta1.OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in, out, s) +// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) } -func autoConvert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta1.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { +func autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { out.Namespace = in.Namespace out.DaemonSetName = in.DaemonSetName out.ContainerName = in.ContainerName return nil } -// Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta1.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1beta1_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) +// Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) } -func autoConvert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in *QuickFilter, out *v1beta1.QuickFilter, s conversion.Scope) error { +func autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { out.Name = in.Name out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) out.Default = in.Default return nil } -// Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter is an autogenerated conversion function. -func Convert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in *QuickFilter, out *v1beta1.QuickFilter, s conversion.Scope) error { - return autoConvert_v1alpha1_QuickFilter_To_v1beta1_QuickFilter(in, out, s) +// Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. +func Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + return autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) } -func autoConvert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta1.QuickFilter, out *QuickFilter, s conversion.Scope) error { +func autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { out.Name = in.Name out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) out.Default = in.Default return nil } -// Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. -func Convert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta1.QuickFilter, out *QuickFilter, s conversion.Scope) error { - return autoConvert_v1beta1_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) +// Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. +func Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) } -func autoConvert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in *ServerTLS, out *v1beta1.ServerTLS, s conversion.Scope) error { - out.Type = v1beta1.ServerTLSConfigType(in.Type) - out.Provided = (*v1beta1.CertificateReference)(unsafe.Pointer(in.Provided)) +func autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + out.Type = v1beta2.ServerTLSConfigType(in.Type) + out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) return nil } -// Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS is an autogenerated conversion function. -func Convert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in *ServerTLS, out *v1beta1.ServerTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ServerTLS_To_v1beta1_ServerTLS(in, out, s) +// Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. +func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + return autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) } -func autoConvert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta1.ServerTLS, out *ServerTLS, s conversion.Scope) error { +func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { out.Type = ServerTLSConfigType(in.Type) out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) return nil } -// Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS is an autogenerated conversion function. -func Convert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta1.ServerTLS, out *ServerTLS, s conversion.Scope) error { - return autoConvert_v1beta1_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) +// Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS is an autogenerated conversion function. +func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) } diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index d7e24aa1a..4f23c7ff0 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -12,4 +12,5 @@ limitations under the License. */ // Package v1beta1 contains the v1beta1 API implementation. +// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/api/v1beta2 package v1beta1 diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 6d3d2b149..937157e42 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -60,6 +60,7 @@ type FlowCollectorSpec struct { Processor FlowCollectorFLP `json:"processor,omitempty"` // loki, the flow store, client settings. + // +k8s:conversion-gen=false Loki FlowCollectorLoki `json:"loki,omitempty"` // consolePlugin defines the settings related to the OpenShift Console plugin, when available. @@ -753,7 +754,6 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" -// +kubebuilder:storageversion // FlowCollector is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index feda7f3f2..5897d9b71 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -16,17 +16,103 @@ limitations under the License. package v1beta1 -import ctrl "sigs.k8s.io/controller-runtime" +import ( + "fmt" -// +kubebuilder:webhook:verbs=create;update,path=/validate-netobserv-io-v1beta1-flowcollector,mutating=false,failurePolicy=fail,groups=netobserv.io,resources=flowcollectors,versions=v1beta1,name=flowcollectorconversionwebhook.netobserv.io,sideEffects=None,admissionReviewVersions=v1 -func (r *FlowCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() + "github.com/netobserv/network-observability-operator/api/v1beta2" + utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiconversion "k8s.io/apimachinery/pkg/conversion" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this v1beta1 FlowCollector to its v1beta2 equivalent (the conversion Hub) +// https://book.kubebuilder.io/multiversion-tutorial/conversion.html +func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta2.FlowCollector) + + if err := Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { + return fmt.Errorf("copying v1beta1.FlowCollector into v1beta2.FlowCollector: %w", err) + } + dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) + copy(dst.Status.Conditions, r.Status.Conditions) + + // Manually restore data. + restored := &v1beta2.FlowCollector{} + if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { + return err + } + + dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes + + if restored.Spec.Processor.ConversationHeartbeatInterval != nil { + dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval + } + + if restored.Spec.Processor.ConversationEndTimeout != nil { + dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout + } + + if restored.Spec.Processor.Metrics.DisableAlerts != nil { + dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts + } + + dst.Spec.Loki.Manual = restored.Spec.Loki.Manual + + return nil +} + +// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1beta1 +func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1beta2.FlowCollector) + + if err := Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(src, r, nil); err != nil { + return fmt.Errorf("copying v1beta2.FlowCollector into v1beta1.FlowCollector: %w", err) + } + r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) + copy(r.Status.Conditions, src.Status.Conditions) + + // Preserve Hub data on down-conversion except for metadata + if err := utilconversion.MarshalData(src, r); err != nil { + return err + } + return nil +} + +func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) +} + +func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1beta2.FlowCollectorList) + return Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(src, r, nil) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) } -// Hub marks this version as a conversion hub. -// All the other version need to provide converters from/to this version. -// https://book.kubebuilder.io/multiversion-tutorial/conversion-concepts.html -func (*FlowCollector) Hub() {} -func (*FlowCollectorList) Hub() {} +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) +} diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go index 64920b249..20d0b792d 100644 --- a/api/v1beta1/groupversion_info.go +++ b/api/v1beta1/groupversion_info.go @@ -32,5 +32,6 @@ var ( SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme + localSchemeBuilder = SchemeBuilder.SchemeBuilder ) diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..235764a3c --- /dev/null +++ b/api/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1024 @@ +//go:build !ignore_autogenerated_core +// +build !ignore_autogenerated_core + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + out.Type = v1beta2.MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.CertFile = in.CertFile + out.CertKey = in.CertKey + return nil +} + +// Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. +func Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) +} + +func autoConvert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + out.Type = MountableType(in.Type) + out.Name = in.Name + out.Namespace = in.Namespace + out.CertFile = in.CertFile + out.CertKey = in.CertKey + return nil +} + +// Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference is an autogenerated conversion function. +func Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { + return autoConvert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(in, out, s) +} + +func autoConvert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + out.Enable = in.Enable + out.InsecureSkipVerify = in.InsecureSkipVerify + if err := Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + return err + } + if err := Convert_v1beta1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. +func Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) +} + +func autoConvert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + out.Enable = in.Enable + out.InsecureSkipVerify = in.InsecureSkipVerify + if err := Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { + return err + } + if err := Convert_v1beta2_CertificateReference_To_v1beta1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS is an autogenerated conversion function. +func Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(in, out, s) +} + +func autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) +} + +func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig is an autogenerated conversion function. +func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(in, out, s) +} + +func autoConvert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) + return nil +} + +// Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) +} + +func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) + return nil +} + +// Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig is an autogenerated conversion function. +func Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { + return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in, out, s) +} + +func autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + return nil +} + +// Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function. +func Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in, out, s) +} + +func autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + return nil +} + +// Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig is an autogenerated conversion function. +func Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error { + return autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in, out, s) +} + +func autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { + return err + } + out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + out.DisableAlerts = *(*[]v1beta2.FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) + return nil +} + +// Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. +func Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) +} + +func autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { + if err := Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { + return err + } + out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + out.DisableAlerts = *(*[]FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) + return nil +} + +func autoConvert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. +func Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) +} + +func autoConvert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector is an autogenerated conversion function. +func Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + out.Type = in.Type + if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + out.Type = in.Type + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + out.Register = (*bool)(unsafe.Pointer(in.Register)) + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + out.Port = in.Port + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.LogLevel = in.LogLevel + if err := Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + return err + } + if err := Convert_v1beta1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + return err + } + out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + return nil +} + +// Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { + out.Register = (*bool)(unsafe.Pointer(in.Register)) + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + out.Port = in.Port + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.LogLevel = in.LogLevel + if err := Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { + return err + } + if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { + return err + } + out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) + return nil +} + +// Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + out.Type = v1beta2.ExporterType(in.Type) + if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { + out.Type = ExporterType(in.Type) + if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + out.Port = in.Port + out.HealthPort = in.HealthPort + out.ProfilePort = in.ProfilePort + out.ImagePullPolicy = in.ImagePullPolicy + if err := Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + return err + } + out.LogLevel = in.LogLevel + out.Resources = in.Resources + out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes)) + out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields)) + out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas)) + if err := Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + return err + } + out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity + out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize + out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes)) + out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) + out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) + out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { + out.Port = in.Port + out.HealthPort = in.HealthPort + out.ProfilePort = in.ProfilePort + out.ImagePullPolicy = in.ImagePullPolicy + if err := Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { + return err + } + out.LogLevel = in.LogLevel + out.Resources = in.Resources + out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes)) + out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields)) + out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas)) + if err := Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { + return err + } + out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity + out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize + out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes)) + out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) + out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) + out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) + if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + out.Status = in.Status + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + out.Status = in.Status + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +// Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + out.Address = in.Address + out.Topic = in.Topic + if err := Convert_v1beta1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + out.Address = in.Address + out.Topic = in.Topic + if err := Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1beta2.FlowCollector, len(*in)) + for i := range *in { + if err := Convert_v1beta1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowCollector, len(*in)) + for i := range *in { + if err := Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorList_To_v1beta1_FlowCollectorList(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.URL requires manual conversion: does not exist in peer-type + // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type + // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type + // WARNING: in.TenantID requires manual conversion: does not exist in peer-type + // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type + // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type + // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type + // WARNING: in.Timeout requires manual conversion: does not exist in peer-type + // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type + // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type + // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type + // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type + // WARNING: in.TLS requires manual conversion: does not exist in peer-type + // WARNING: in.StatusTLS requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { + // WARNING: in.Mode requires manual conversion: does not exist in peer-type + // WARNING: in.Manual requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + out.Namespace = in.Namespace + if err := Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + return err + } + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + return err + } + out.DeploymentModel = in.DeploymentModel + if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + // INFO: in.Exporters opted out of conversion generation + return nil +} + +// Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + out.Namespace = in.Namespace + if err := Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { + return err + } + // INFO: in.Loki opted out of conversion generation + if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { + return err + } + out.DeploymentModel = in.DeploymentModel + if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { + return err + } + // INFO: in.Exporters opted out of conversion generation + return nil +} + +// Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) + out.Namespace = in.Namespace + return nil +} + +// Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorStatus_To_v1beta1_FlowCollectorStatus(in, out, s) +} + +func autoConvert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + out.Port = in.Port + if err := Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) +} + +func autoConvert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + out.Port = in.Port + if err := Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig is an autogenerated conversion function. +func Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { + return autoConvert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(in, out, s) +} + +func autoConvert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + out.DaemonSetName = in.DaemonSetName + out.ContainerName = in.ContainerName + return nil +} + +// Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) +} + +func autoConvert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + out.Namespace = in.Namespace + out.DaemonSetName = in.DaemonSetName + out.ContainerName = in.ContainerName + return nil +} + +// Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig is an autogenerated conversion function. +func Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(in, out, s) +} + +func autoConvert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + out.Name = in.Name + out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) + out.Default = in.Default + return nil +} + +// Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. +func Convert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) +} + +func autoConvert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + out.Name = in.Name + out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) + out.Default = in.Default + return nil +} + +// Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter is an autogenerated conversion function. +func Convert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { + return autoConvert_v1beta2_QuickFilter_To_v1beta1_QuickFilter(in, out, s) +} + +func autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + out.Type = v1beta2.ServerTLSConfigType(in.Type) + out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) + return nil +} + +// Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. +func Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) +} + +func autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + out.Type = ServerTLSConfigType(in.Type) + out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) + return nil +} + +// Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS is an autogenerated conversion function. +func Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { + return autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in, out, s) +} diff --git a/api/v1beta2/doc.go b/api/v1beta2/doc.go new file mode 100644 index 000000000..cfb9ccd8d --- /dev/null +++ b/api/v1beta2/doc.go @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains the v1beta2 API implementation. +package v1beta2 diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go new file mode 100644 index 000000000..8c206b912 --- /dev/null +++ b/api/v1beta2/flowcollector_types.go @@ -0,0 +1,787 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta2 + +import ( + ascv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + AgentIPFIX = "IPFIX" + AgentEBPF = "EBPF" + DeploymentModelDirect = "DIRECT" + DeploymentModelKafka = "KAFKA" +) + +// Please notice that the FlowCollectorSpec's properties MUST redefine one of the default +// values to force the definition of the section when it is not provided by the manifest. +// This will cause that the remaining default fields will be set according to their definition. +// Otherwise, omitting the sections in the manifest would lead to zero-valued properties. +// This is a workaround for the related issue: +// https://github.com/kubernetes-sigs/controller-tools/issues/622 + +// FlowCollectorSpec defines the desired state of FlowCollector. +//

+// *: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature +// is not officially supported by Red Hat. It may have been, for instance, contributed by the community +// and accepted without a formal agreement for maintenance. The product maintainers may provide some support +// for these features as a best effort only. +type FlowCollectorSpec struct { + // Important: Run "make generate" to regenerate code after modifying this file + + // namespace where NetObserv pods are deployed. + // If empty, the namespace of the operator is going to be used. + // +optional + Namespace string `json:"namespace,omitempty"` + + // agent for flows extraction. + // +kubebuilder:default:={type:"EBPF"} + Agent FlowCollectorAgent `json:"agent"` + + // processor defines the settings of the component that receives the flows from the agent, + // enriches them, and forwards them to the Loki persistence layer. + Processor FlowCollectorFLP `json:"processor,omitempty"` + + // loki, the flow store, client settings. + // +k8s:conversion-gen=false + Loki FlowCollectorLoki `json:"loki,omitempty"` + + // consolePlugin defines the settings related to the OpenShift Console plugin, when available. + ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"` + + // deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make + // the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption + // by the processor. + // Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="DIRECT";"KAFKA" + // +kubebuilder:validation:Required + // +kubebuilder:default:=DIRECT + DeploymentModel string `json:"deploymentModel"` + + // kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". + // +optional + Kafka FlowCollectorKafka `json:"kafka,omitempty"` + + // exporters define additional optional exporters for custom consumption or storage. + // +optional + // +k8s:conversion-gen=false + Exporters []*FlowCollectorExporter `json:"exporters"` +} + +// FlowCollectorAgent is a discriminated union that allows to select either ipfix or ebpf, but does not +// allow defining both fields. +// +union +type FlowCollectorAgent struct { + // type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, + // "IPFIX" - deprecated (*) - to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better + // performances and should work regardless of the CNI installed on the cluster. + // "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, + // but they would require manual configuration). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="EBPF";"IPFIX" + // +kubebuilder:validation:Required + // +kubebuilder:default:=EBPF + Type string `json:"type"` + + // ipfix - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when the "agent.type" + // property is set to "IPFIX". + // +optional + IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` + + // ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" + // property is set to "EBPF". + // +optional + EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` +} + +// FlowCollectorIPFIX defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the +// flows information +type FlowCollectorIPFIX struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ + //+kubebuilder:default:="20s" + // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending + CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty" mapstructure:"cacheActiveTimeout,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=400 + // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows + CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty" mapstructure:"cacheMaxFlows,omitempty"` + + //+kubebuilder:validation:Minimum=2 + //+kubebuilder:default:=400 + // sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. + // To ensure cluster stability, it is not possible to set a value below 2. + // If you really want to sample every packet, which might impact the cluster stability, + // refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX. + Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` + + //+kubebuilder:default:=false + // forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. + // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. + // If you REALLY want to do that, set this flag to true. Use at your own risk. + // When it is set to true, the value of "sampling" is ignored. + ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` + + // clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. + ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` + + // ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` +} + +// FlowCollectorEBPF defines a FlowCollector that uses eBPF to collect the flows information +type FlowCollectorEBPF struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // imagePullPolicy is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"800Mi"}} + // resources are the compute resources required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + // sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=50 + //+optional + Sampling *int32 `json:"sampling,omitempty"` + + // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. + // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, + // however you can expect higher memory consumption and an increased latency in the flow collection. + //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ + //+kubebuilder:default:="5s" + CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty"` + + // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. + // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, + // however you can expect higher memory consumption and an increased latency in the flow collection. + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default:=100000 + CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty"` + + // interfaces contains the interface names from where flows will be collected. If empty, the agent + // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. + // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, + // otherwise it will be matched as a case-sensitive string. + //+optional + Interfaces []string `json:"interfaces"` + + // excludeInterfaces contains the interface names that will be excluded from flow tracing. + // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, + // otherwise it will be matched as a case-sensitive string. + //+kubebuilder:default=lo; + //+optional + ExcludeInterfaces []string `json:"excludeInterfaces"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // logLevel defines the log level for the NetObserv eBPF Agent + LogLevel string `json:"logLevel,omitempty"` + + // privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: + // in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) + // to the container, to enable its correct operation. + // If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) + // then you can turn on this mode for more global privileges. + // +optional + Privileged bool `json:"privileged,omitempty"` + + //+kubebuilder:default:=10485760 + // +optional + // kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. + KafkaBatchSize int `json:"kafkaBatchSize"` + + // Debug allows setting some aspects of the internal configuration of the eBPF agent. + // This section is aimed exclusively for debugging and fine-grained performance optimizations + // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. + // +optional + Debug DebugConfig `json:"debug,omitempty"` +} + +// FlowCollectorKafka defines the desired Kafka config of FlowCollector +type FlowCollectorKafka struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:="" + // address of the Kafka server + Address string `json:"address"` + + //+kubebuilder:default:="" + // kafka topic to use. It must exist, NetObserv will not create it. + Topic string `json:"topic"` + + // tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. + // Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). + // +optional + TLS ClientTLS `json:"tls"` +} + +type FlowCollectorIPFIXReceiver struct { + //+kubebuilder:default:="" + // address of the ipfix external receiver + TargetHost string `json:"targetHost"` + + // port for the ipfix external receiver + TargetPort int `json:"targetPort"` + + // Transport protocol (tcp/udp) to be used for the IPFIX connection, defaults to tcp + // +unionDiscriminator + // +kubebuilder:validation:Enum:="TCP";"UDP" + // +optional + Transport string `json:"transport,omitempty"` +} + +const ( + ServerTLSDisabled = "DISABLED" + ServerTLSProvided = "PROVIDED" + ServerTLSAuto = "AUTO" +) + +type ServerTLSConfigType string + +// ServerTLS define the TLS configuration, server side +type ServerTLS struct { + // Select the type of TLS configuration + // "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, + // and "AUTO" to use OpenShift auto generated certificate using annotations + // +unionDiscriminator + // +kubebuilder:validation:Enum:="DISABLED";"PROVIDED";"AUTO" + // +kubebuilder:validation:Required + //+kubebuilder:default:="DISABLED" + Type ServerTLSConfigType `json:"type,omitempty"` + + // TLS configuration. + // +optional + Provided *CertificateReference `json:"provided"` +} + +// MetricsServerConfig define the metrics server endpoint configuration for Prometheus scraper +type MetricsServerConfig struct { + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=9102 + // the prometheus HTTP port + Port int32 `json:"port,omitempty"` + + // TLS configuration. + // +optional + TLS ServerTLS `json:"tls"` +} + +const ( + AlertNoFlows = "NetObservNoFlows" + AlertLokiError = "NetObservLokiError" +) + +// Name of a processor alert. +// Possible values are: +// `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period. +// `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors. +// +kubebuilder:validation:Enum:="NetObservNoFlows";"NetObservLokiError" +type FLPAlert string + +// FLPMetrics define the desired FLP configuration regarding metrics +type FLPMetrics struct { + // metricsServer endpoint configuration for Prometheus scraper + // +optional + Server MetricsServerConfig `json:"server,omitempty"` + + // ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . + // Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads + //+kubebuilder:default:={"egress","packets"} + // +optional + IgnoreTags []string `json:"ignoreTags"` + + // disableAlerts is a list of alerts that should be disabled. + // Possible values are: + // `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period. + // `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors. + // +optional + DisableAlerts []FLPAlert `json:"disableAlerts"` +} + +const ( + LogTypeFlows = "FLOWS" + LogTypeConversations = "CONVERSATIONS" + LogTypeEndedConversations = "ENDED_CONVERSATIONS" + LogTypeAll = "ALL" +) + +// FlowCollectorFLP defines the desired flowlogs-pipeline state of FlowCollector +type FlowCollectorFLP struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:validation:Minimum=1025 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=2055 + // port of the flow collector (host port) + // By conventions, some value are not authorized port must not be below 1024 and must not equal this values: + // 4789,6081,500, and 4500 + Port int32 `json:"port,omitempty"` + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=8080 + // healthPort is a collector HTTP port in the Pod that exposes the health check API + HealthPort int32 `json:"healthPort,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:validation:Maximum=65535 + //+optional + // profilePort allows setting up a Go pprof profiler listening to this port + ProfilePort int32 `json:"profilePort,omitempty"` + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // imagePullPolicy is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + // Metrics define the processor configuration regarding metrics + Metrics FLPMetrics `json:"metrics,omitempty"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // logLevel of the collector runtime + LogLevel string `json:"logLevel,omitempty"` + + //+kubebuilder:default:={requests:{memory:"100Mi",cpu:"100m"},limits:{memory:"800Mi"}} + // resources are the compute resources required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + //+kubebuilder:default:=true + // enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes + EnableKubeProbes *bool `json:"enableKubeProbes,omitempty"` + + //+kubebuilder:default:=true + // dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space. + DropUnusedFields *bool `json:"dropUnusedFields,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=3 + // kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. + // This setting is ignored when Kafka is disabled. + KafkaConsumerReplicas *int32 `json:"kafkaConsumerReplicas,omitempty"` + + // kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. + // This setting is ignored when Kafka is disabled. + // +optional + KafkaConsumerAutoscaler FlowCollectorHPA `json:"kafkaConsumerAutoscaler,omitempty"` + + //+kubebuilder:default:=1000 + // +optional + // kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. + KafkaConsumerQueueCapacity int `json:"kafkaConsumerQueueCapacity"` + + //+kubebuilder:default:=10485760 + // +optional + // kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. + KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` + + // logTypes defines the desired record types to generate. Possible values are "FLOWS" (default) to export + // flowLogs, "CONVERSATIONS" to generate newConnection, heartbeat, endConnection events, "ENDED_CONVERSATIONS" to generate + // only endConnection events or "ALL" to generate both flow logs and conversations events + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum:="FLOWS";"CONVERSATIONS";"ENDED_CONVERSATIONS";"ALL" + // +kubebuilder:default:=FLOWS + LogTypes *string `json:"logTypes,omitempty"` + + //+kubebuilder:default:="30s" + // +optional + // conversation heartbeat interval is the duration of time to wait between heartbeat reports of a conversation + ConversationHeartbeatInterval *metav1.Duration `json:"conversationHeartbeatInterval,omitempty"` + + //+kubebuilder:default:="10s" + // +optional + // conversation end timeout is the duration of time to wait from the last flow log to end a conversation + ConversationEndTimeout *metav1.Duration `json:"conversationEndTimeout,omitempty"` + + //+kubebuilder:default:="5s" + // +optional + // conversation terminating timeout is the duration of time to wait from detected FIN flag to end a connection + ConversationTerminatingTimeout *metav1.Duration `json:"conversationTerminatingTimeout,omitempty"` + + // Debug allows setting some aspects of the internal configuration of the flow processor. + // This section is aimed exclusively for debugging and fine-grained performance optimizations + // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. + // +optional + Debug DebugConfig `json:"debug,omitempty"` +} + +const ( + HPAStatusDisabled = "DISABLED" + HPAStatusEnabled = "ENABLED" +) + +type FlowCollectorHPA struct { + // +kubebuilder:validation:Enum:=DISABLED;ENABLED + // +kubebuilder:default:=DISABLED + // Status describe the desired status regarding deploying an horizontal pod autoscaler + // DISABLED will not deploy an horizontal pod autoscaler + // ENABLED will deploy an horizontal pod autoscaler + Status string `json:"status,omitempty"` + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // +kubebuilder:default:=3 + // +optional + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics used by the pod autoscaler + // +optional + Metrics []ascv2.MetricSpec `json:"metrics"` +} + +const ( + LokiAuthDisabled = "DISABLED" + LokiAuthUseHostToken = "HOST" + LokiAuthForwardUserToken = "FORWARD" +) + +type LokiManualParams struct { + //+kubebuilder:default:="http://loki:3100/" + // url is the address of an existing Loki service to push the flows to. When using the Loki Operator, + // set it to the Loki gateway service with the `network` tenant set in path, for example + // https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. + URL string `json:"url,omitempty"` + + //+kubebuilder:validation:optional + // querierURL specifies the address of the Loki querier service, in case it is different from the + // Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester + // and querier are in the same server). When using the Loki Operator, do not set it, since + // ingestion and queries use the Loki gateway. + QuerierURL string `json:"querierUrl,omitempty"` + + //+kubebuilder:validation:optional + // statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the + // Loki querier URL. If empty, the QuerierURL value will be used. + // This is useful to show error messages and some context in the frontend. + // When using the Loki Operator, set it to the Loki HTTP query frontend service, for example + // https://loki-query-frontend-http.netobserv.svc:3100/. + // statusTLS configuration will be used when statusUrl is set. + StatusURL string `json:"statusUrl,omitempty"` + + //+kubebuilder:default:="netobserv" + // tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. + // When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. + TenantID string `json:"tenantID,omitempty"` + + // +kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" + //+kubebuilder:default:="DISABLED" + // AuthToken describe the way to get a token to authenticate to Loki. + // DISABLED will not send any token with the request. + // HOST - deprecated (*) - will use the local pod service account to authenticate to Loki. + // FORWARD will forward the user token for authorization. + // When using the Loki Operator, this should be set to `FORWARD`. + AuthToken string `json:"authToken,omitempty"` + + //+kubebuilder:default:="1s" + // batchWait is max time to wait before sending a batch. + BatchWait metav1.Duration `json:"batchWait,omitempty"` + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default:=102400 + // batchSize is max batch size (in bytes) of logs to accumulate before sending. + BatchSize int64 `json:"batchSize,omitempty"` + + //+kubebuilder:default:="10s" + // timeout is the maximum time connection / request limit. + // A Timeout of zero means no timeout. + Timeout metav1.Duration `json:"timeout,omitempty"` + + //+kubebuilder:default:="1s" + // minBackoff is the initial backoff time for client connection between retries. + MinBackoff metav1.Duration `json:"minBackoff,omitempty"` + + //+kubebuilder:default:="5s" + // maxBackoff is the maximum backoff time for client connection between retries. + MaxBackoff metav1.Duration `json:"maxBackoff,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=2 + // maxRetries is the maximum number of retries for client connections. + MaxRetries *int32 `json:"maxRetries,omitempty"` + + //+kubebuilder:default:={"app":"netobserv-flowcollector"} + // +optional + // staticLabels is a map of common labels to set on each flow. + StaticLabels map[string]string `json:"staticLabels"` + + // tls client configuration for loki URL. + // +optional + TLS ClientTLS `json:"tls"` + + // tls client configuration for loki status URL. + // +optional + StatusTLS ClientTLS `json:"statusTls"` +} + +// FlowCollectorLoki defines the desired state for FlowCollector's Loki client. +type FlowCollectorLoki struct { + + //+kubebuilder:validation:Enum=manual + Mode string `json:"mode,omitempty"` + + Manual LokiManualParams `json:"manual,omitempty"` +} + +// FlowCollectorConsolePlugin defines the desired ConsolePlugin state of FlowCollector +type FlowCollectorConsolePlugin struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=true + // register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. + // When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. + // E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]' + Register *bool `json:"register,omitempty"` + + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default:=1 + // replicas defines the number of replicas (pods) to start. + Replicas *int32 `json:"replicas,omitempty"` + + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=65535 + //+kubebuilder:default:=9001 + // port is the plugin service port. Do not use 9002, which is reserved for metrics. + Port int32 `json:"port,omitempty"` + + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default:=IfNotPresent + // imagePullPolicy is the Kubernetes pull policy for the image defined above + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"100Mi"}} + // resources, in terms of compute resources, required by this container. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + + //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic + //+kubebuilder:default:=info + // logLevel for the console plugin backend + LogLevel string `json:"logLevel,omitempty"` + + // autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. + // +optional + Autoscaler FlowCollectorHPA `json:"autoscaler,omitempty"` + + //+kubebuilder:default:={enable:true} + // portNaming defines the configuration of the port-to-service name translation + PortNaming ConsolePluginPortConfig `json:"portNaming,omitempty"` + + //+kubebuilder:default:={{name:"Applications",filter:{"src_namespace!":"openshift-,netobserv","dst_namespace!":"openshift-,netobserv"},default:true},{name:"Infrastructure",filter:{"src_namespace":"openshift-,netobserv","dst_namespace":"openshift-,netobserv"}},{name:"Pods network",filter:{"src_kind":"Pod","dst_kind":"Pod"},default:true},{name:"Services network",filter:{"dst_kind":"Service"}}} + // +optional + // quickFilters configures quick filter presets for the Console plugin + QuickFilters []QuickFilter `json:"quickFilters"` +} + +// Configuration of the port to service name translation feature of the console plugin +type ConsolePluginPortConfig struct { + //+kubebuilder:default:=true + // enable the console plugin port-to-service name translation + Enable *bool `json:"enable,omitempty"` + + // portNames defines additional port names to use in the console. + // Example: portNames: {"3100": "loki"} + // +optional + PortNames map[string]string `json:"portNames" yaml:"portNames"` +} + +// QuickFilter defines preset configuration for Console's quick filters +type QuickFilter struct { + // name of the filter, that will be displayed in Console + // +kubebuilder:MinLength:=1 + Name string `json:"name"` + // filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. + // Example: filter: {"src_namespace": "namespace1,namespace2"} + // +kubebuilder:MinProperties:=1 + Filter map[string]string `json:"filter"` + // default defines whether this filter should be active by default or not + // +optional + Default bool `json:"default,omitempty"` +} + +// ClusterNetworkOperatorConfig defines the desired configuration related to the Cluster Network Configuration +type ClusterNetworkOperatorConfig struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=openshift-network-operator + // namespace where the config map is going to be deployed. + Namespace string `json:"namespace,omitempty"` +} + +// OVNKubernetesConfig defines the desired configuration related to the OVN-Kubernetes network provider, when Cluster Network Operator isn't installed. +type OVNKubernetesConfig struct { + // Important: Run "make generate" to regenerate code after modifying this file + + //+kubebuilder:default:=ovn-kubernetes + // namespace where OVN-Kubernetes pods are deployed. + Namespace string `json:"namespace,omitempty"` + + //+kubebuilder:default:=ovnkube-node + // daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods. + DaemonSetName string `json:"daemonSetName,omitempty"` + + //+kubebuilder:default:=ovnkube-node + // containerName defines the name of the container to configure for IPFIX. + ContainerName string `json:"containerName,omitempty"` +} + +type MountableType string + +const ( + RefTypeSecret MountableType = "secret" + RefTypeConfigMap MountableType = "configmap" +) + +type CertificateReference struct { + //+kubebuilder:validation:Enum=configmap;secret + // type for the certificate reference: "configmap" or "secret" + Type MountableType `json:"type,omitempty"` + + // name of the config map or secret containing certificates + Name string `json:"name,omitempty"` + + // namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. + // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. + // +optional + //+kubebuilder:default:="" + Namespace string `json:"namespace,omitempty"` + + // certFile defines the path to the certificate file name within the config map or secret + CertFile string `json:"certFile,omitempty"` + + // certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. + // +optional + CertKey string `json:"certKey,omitempty"` +} + +// ClientTLS defines TLS client configuration +type ClientTLS struct { + //+kubebuilder:default:=false + // enable TLS + Enable bool `json:"enable,omitempty"` + + //+kubebuilder:default:=false + // insecureSkipVerify allows skipping client-side verification of the server certificate + // If set to true, CACert field will be ignored + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` + + // caCert defines the reference of the certificate for the Certificate Authority + CACert CertificateReference `json:"caCert,omitempty"` + + // userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) + // +optional + UserCert CertificateReference `json:"userCert,omitempty"` +} + +// DebugConfig allows tweaking some aspects of the internal configuration of the agent and FLP. +// They are aimed exclusively for debugging. Users setting these values do it at their own risk. +type DebugConfig struct { + // env allows passing custom environment variables to the NetObserv Agent. Useful for passing + // some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be + // publicly exposed as part of the FlowCollector descriptor, as they are only useful + // in edge debug and support scenarios. + //+optional + Env map[string]string `json:"env,omitempty"` +} + +// Add more exporter types below +type ExporterType string + +const ( + KafkaExporter ExporterType = "KAFKA" + IpfixExporter ExporterType = "IPFIX" +) + +// FlowCollectorExporter defines an additional exporter to send enriched flows to. +type FlowCollectorExporter struct { + // type selects the type of exporters. The available options are "KAFKA" and "IPFIX". "IPFIX" is unsupported (*). + // +unionDiscriminator + // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" + // +kubebuilder:validation:Required + Type ExporterType `json:"type"` + + // kafka configuration, such as the address and topic, to send enriched flows to. + // +optional + Kafka FlowCollectorKafka `json:"kafka,omitempty"` + + // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). + // +optional + IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` +} + +// FlowCollectorStatus defines the observed state of FlowCollector +type FlowCollectorStatus struct { + // Important: Run "make" to regenerate code after modifying this file + + // conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // namespace where console plugin and flowlogs-pipeline have been deployed. + Namespace string `json:"namespace,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` +// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` +// +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" +// +kubebuilder:storageversion + +// FlowCollector is the schema for the network flows collection API, which pilots and configures the underlying deployments. +type FlowCollector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FlowCollectorSpec `json:"spec,omitempty"` + Status FlowCollectorStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// FlowCollectorList contains a list of FlowCollector +type FlowCollectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FlowCollector `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FlowCollector{}, &FlowCollectorList{}) +} diff --git a/api/v1beta2/flowcollector_webhook.go b/api/v1beta2/flowcollector_webhook.go new file mode 100644 index 000000000..c3e3a71e8 --- /dev/null +++ b/api/v1beta2/flowcollector_webhook.go @@ -0,0 +1,32 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ctrl "sigs.k8s.io/controller-runtime" + +// +kubebuilder:webhook:verbs=create;update,path=/validate-netobserv-io-v1beta2-flowcollector,mutating=false,failurePolicy=fail,groups=netobserv.io,resources=flowcollectors,versions=v1beta2,name=flowcollectorconversionwebhook.netobserv.io,sideEffects=None,admissionReviewVersions=v1 +func (r *FlowCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// Hub marks this version as a conversion hub. +// All the other version need to provide converters from/to this version. +// https://book.kubebuilder.io/multiversion-tutorial/conversion-concepts.html +func (*FlowCollector) Hub() {} +func (*FlowCollectorList) Hub() {} diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..9fa9dec5d --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the flows v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=flows.netobserv.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "flows.netobserv.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b31f04506 --- /dev/null +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,614 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateReference) DeepCopyInto(out *CertificateReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateReference. +func (in *CertificateReference) DeepCopy() *CertificateReference { + if in == nil { + return nil + } + out := new(CertificateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { + *out = *in + out.CACert = in.CACert + out.UserCert = in.UserCert +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. +func (in *ClientTLS) DeepCopy() *ClientTLS { + if in == nil { + return nil + } + out := new(ClientTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkOperatorConfig) DeepCopyInto(out *ClusterNetworkOperatorConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkOperatorConfig. +func (in *ClusterNetworkOperatorConfig) DeepCopy() *ClusterNetworkOperatorConfig { + if in == nil { + return nil + } + out := new(ClusterNetworkOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginPortConfig) DeepCopyInto(out *ConsolePluginPortConfig) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.PortNames != nil { + in, out := &in.PortNames, &out.PortNames + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginPortConfig. +func (in *ConsolePluginPortConfig) DeepCopy() *ConsolePluginPortConfig { + if in == nil { + return nil + } + out := new(ConsolePluginPortConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DebugConfig) DeepCopyInto(out *DebugConfig) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugConfig. +func (in *DebugConfig) DeepCopy() *DebugConfig { + if in == nil { + return nil + } + out := new(DebugConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { + *out = *in + in.Server.DeepCopyInto(&out.Server) + if in.IgnoreTags != nil { + in, out := &in.IgnoreTags, &out.IgnoreTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisableAlerts != nil { + in, out := &in.DisableAlerts, &out.DisableAlerts + *out = make([]FLPAlert, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FLPMetrics. +func (in *FLPMetrics) DeepCopy() *FLPMetrics { + if in == nil { + return nil + } + out := new(FLPMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollector) DeepCopyInto(out *FlowCollector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollector. +func (in *FlowCollector) DeepCopy() *FlowCollector { + if in == nil { + return nil + } + out := new(FlowCollector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowCollector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { + *out = *in + out.IPFIX = in.IPFIX + in.EBPF.DeepCopyInto(&out.EBPF) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. +func (in *FlowCollectorAgent) DeepCopy() *FlowCollectorAgent { + if in == nil { + return nil + } + out := new(FlowCollectorAgent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlugin) { + *out = *in + if in.Register != nil { + in, out := &in.Register, &out.Register + *out = new(bool) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + in.Autoscaler.DeepCopyInto(&out.Autoscaler) + in.PortNaming.DeepCopyInto(&out.PortNaming) + if in.QuickFilters != nil { + in, out := &in.QuickFilters, &out.QuickFilters + *out = make([]QuickFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorConsolePlugin. +func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { + if in == nil { + return nil + } + out := new(FlowCollectorConsolePlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(int32) + **out = **in + } + if in.Interfaces != nil { + in, out := &in.Interfaces, &out.Interfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeInterfaces != nil { + in, out := &in.ExcludeInterfaces, &out.ExcludeInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Debug.DeepCopyInto(&out.Debug) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. +func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { + if in == nil { + return nil + } + out := new(FlowCollectorEBPF) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorExporter) DeepCopyInto(out *FlowCollectorExporter) { + *out = *in + out.Kafka = in.Kafka + out.IPFIX = in.IPFIX +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorExporter. +func (in *FlowCollectorExporter) DeepCopy() *FlowCollectorExporter { + if in == nil { + return nil + } + out := new(FlowCollectorExporter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) { + *out = *in + in.Metrics.DeepCopyInto(&out.Metrics) + in.Resources.DeepCopyInto(&out.Resources) + if in.EnableKubeProbes != nil { + in, out := &in.EnableKubeProbes, &out.EnableKubeProbes + *out = new(bool) + **out = **in + } + if in.DropUnusedFields != nil { + in, out := &in.DropUnusedFields, &out.DropUnusedFields + *out = new(bool) + **out = **in + } + if in.KafkaConsumerReplicas != nil { + in, out := &in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas + *out = new(int32) + **out = **in + } + in.KafkaConsumerAutoscaler.DeepCopyInto(&out.KafkaConsumerAutoscaler) + if in.LogTypes != nil { + in, out := &in.LogTypes, &out.LogTypes + *out = new(string) + **out = **in + } + if in.ConversationHeartbeatInterval != nil { + in, out := &in.ConversationHeartbeatInterval, &out.ConversationHeartbeatInterval + *out = new(v1.Duration) + **out = **in + } + if in.ConversationEndTimeout != nil { + in, out := &in.ConversationEndTimeout, &out.ConversationEndTimeout + *out = new(v1.Duration) + **out = **in + } + if in.ConversationTerminatingTimeout != nil { + in, out := &in.ConversationTerminatingTimeout, &out.ConversationTerminatingTimeout + *out = new(v1.Duration) + **out = **in + } + in.Debug.DeepCopyInto(&out.Debug) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorFLP. +func (in *FlowCollectorFLP) DeepCopy() *FlowCollectorFLP { + if in == nil { + return nil + } + out := new(FlowCollectorFLP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorHPA) DeepCopyInto(out *FlowCollectorHPA) { + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]v2.MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorHPA. +func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { + if in == nil { + return nil + } + out := new(FlowCollectorHPA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { + *out = *in + out.ClusterNetworkOperator = in.ClusterNetworkOperator + out.OVNKubernetes = in.OVNKubernetes +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. +func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { + if in == nil { + return nil + } + out := new(FlowCollectorIPFIX) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. +func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { + if in == nil { + return nil + } + out := new(FlowCollectorIPFIXReceiver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorKafka) DeepCopyInto(out *FlowCollectorKafka) { + *out = *in + out.TLS = in.TLS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorKafka. +func (in *FlowCollectorKafka) DeepCopy() *FlowCollectorKafka { + if in == nil { + return nil + } + out := new(FlowCollectorKafka) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorList) DeepCopyInto(out *FlowCollectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowCollector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorList. +func (in *FlowCollectorList) DeepCopy() *FlowCollectorList { + if in == nil { + return nil + } + out := new(FlowCollectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowCollectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) { + *out = *in + in.Manual.DeepCopyInto(&out.Manual) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorLoki. +func (in *FlowCollectorLoki) DeepCopy() *FlowCollectorLoki { + if in == nil { + return nil + } + out := new(FlowCollectorLoki) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorSpec) DeepCopyInto(out *FlowCollectorSpec) { + *out = *in + in.Agent.DeepCopyInto(&out.Agent) + in.Processor.DeepCopyInto(&out.Processor) + in.Loki.DeepCopyInto(&out.Loki) + in.ConsolePlugin.DeepCopyInto(&out.ConsolePlugin) + out.Kafka = in.Kafka + if in.Exporters != nil { + in, out := &in.Exporters, &out.Exporters + *out = make([]*FlowCollectorExporter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(FlowCollectorExporter) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorSpec. +func (in *FlowCollectorSpec) DeepCopy() *FlowCollectorSpec { + if in == nil { + return nil + } + out := new(FlowCollectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowCollectorStatus) DeepCopyInto(out *FlowCollectorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorStatus. +func (in *FlowCollectorStatus) DeepCopy() *FlowCollectorStatus { + if in == nil { + return nil + } + out := new(FlowCollectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LokiManualParams) DeepCopyInto(out *LokiManualParams) { + *out = *in + out.BatchWait = in.BatchWait + out.Timeout = in.Timeout + out.MinBackoff = in.MinBackoff + out.MaxBackoff = in.MaxBackoff + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(int32) + **out = **in + } + if in.StaticLabels != nil { + in, out := &in.StaticLabels, &out.StaticLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.TLS = in.TLS + out.StatusTLS = in.StatusTLS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiManualParams. +func (in *LokiManualParams) DeepCopy() *LokiManualParams { + if in == nil { + return nil + } + out := new(LokiManualParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { + *out = *in + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. +func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { + if in == nil { + return nil + } + out := new(MetricsServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickFilter) DeepCopyInto(out *QuickFilter) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickFilter. +func (in *QuickFilter) DeepCopy() *QuickFilter { + if in == nil { + return nil + } + out := new(QuickFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerTLS) DeepCopyInto(out *ServerTLS) { + *out = *in + if in.Provided != nil { + in, out := &in.Provided, &out.Provided + *out = new(CertificateReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTLS. +func (in *ServerTLS) DeepCopy() *ServerTLS { + if in == nil { + return nil + } + out := new(ServerTLS) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index e16ba46fb..450d02f36 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4539,6 +4539,2357 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.agent.type + name: Agent + type: string + - jsonPath: .spec.agent.ebpf.sampling + name: Sampling (EBPF) + type: string + - jsonPath: .spec.deploymentModel + name: Deployment Model + type: string + - jsonPath: .status.conditions[*].reason + name: Status + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: FlowCollector is the schema for the network flows collection + API, which pilots and configures the underlying deployments. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'FlowCollectorSpec defines the desired state of FlowCollector. +

*: the mention of "unsupported", or "deprecated" + for a feature throughout this document means that this feature is not + officially supported by Red Hat. It may have been, for instance, contributed + by the community and accepted without a formal agreement for maintenance. + The product maintainers may provide some support for these features + as a best effort only.' + properties: + agent: + default: + type: EBPF + description: agent for flows extraction. + properties: + ebpf: + description: ebpf describes the settings related to the eBPF-based + flow reporter when the "agent.type" property is set to "EBPF". + properties: + cacheActiveTimeout: + default: 5s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 100000 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows. Increasing + `cacheMaxFlows` and `cacheActiveTimeout` can decrease the + network traffic overhead and the CPU load, however you can + expect higher memory consumption and an increased latency + in the flow collection. + format: int32 + minimum: 1 + type: integer + debug: + description: Debug allows setting some aspects of the internal + configuration of the eBPF agent. This section is aimed exclusively + for debugging and fine-grained performance optimizations + (for example GOGC, GOMAXPROCS env vars). Users setting its + values do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very + concrete performance-tuning options (such as GOGC, GOMAXPROCS) + that shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and + support scenarios. + type: object + type: object + excludeInterfaces: + default: + - lo + description: excludeInterfaces contains the interface names + that will be excluded from flow tracing. If an entry is + enclosed by slashes (such as `/br-/`), it will match as + regular expression, otherwise it will be matched as a case-sensitive + string. + items: + type: string + type: array + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy + for the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + interfaces: + description: interfaces contains the interface names from + where flows will be collected. If empty, the agent will + fetch all the interfaces in the system, excepting the ones + listed in ExcludeInterfaces. If an entry is enclosed by + slashes (such as `/br-/`), it will match as regular expression, + otherwise it will be matched as a case-sensitive string. + items: + type: string + type: array + kafkaBatchSize: + default: 10485760 + description: 'kafkaBatchSize limits the maximum size of a + request in bytes before being sent to a partition. Ignored + when not using Kafka. Default: 10MB.' + type: integer + logLevel: + default: info + description: logLevel defines the log level for the NetObserv + eBPF Agent + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + privileged: + description: 'privileged mode for the eBPF Agent container. + In general this setting can be ignored or set to false: + in that case, the operator will set granular capabilities + (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, + to enable its correct operation. If for some reason these + capabilities cannot be set (for example old kernel version + not knowing CAP_BPF) then you can turn on this mode for + more global privileges.' + type: boolean + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources are the compute resources required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sampling: + default: 50 + description: sampling rate of the flow reporter. 100 means + one flow on 100 is sent. 0 or 1 means all flows are sampled. + format: int32 + minimum: 0 + type: integer + type: object + ipfix: + description: ipfix - deprecated (*) - describes the settings + related to the IPFIX-based flow reporter when the "agent.type" + property is set to "IPFIX". + properties: + cacheActiveTimeout: + default: 20s + description: cacheActiveTimeout is the max period during which + the reporter will aggregate flows before sending + pattern: ^\d+(ns|ms|s|m)?$ + type: string + cacheMaxFlows: + default: 400 + description: cacheMaxFlows is the max number of flows in an + aggregate; when reached, the reporter sends the flows + format: int32 + minimum: 0 + type: integer + clusterNetworkOperator: + description: clusterNetworkOperator defines the settings related + to the OpenShift Cluster Network Operator, when available. + properties: + namespace: + default: openshift-network-operator + description: namespace where the config map is going + to be deployed. + type: string + type: object + forceSampleAll: + default: false + description: forceSampleAll allows disabling sampling in the + IPFIX-based flow reporter. It is not recommended to sample + all the traffic with IPFIX, as it might generate cluster + instability. If you REALLY want to do that, set this flag + to true. Use at your own risk. When it is set to true, the + value of "sampling" is ignored. + type: boolean + ovnKubernetes: + description: ovnKubernetes defines the settings of the OVN-Kubernetes + CNI, when available. This configuration is used when using + OVN's IPFIX exports, without OpenShift. When using OpenShift, + refer to the `clusterNetworkOperator` property instead. + properties: + containerName: + default: ovnkube-node + description: containerName defines the name of the container + to configure for IPFIX. + type: string + daemonSetName: + default: ovnkube-node + description: daemonSetName defines the name of the DaemonSet + controlling the OVN-Kubernetes pods. + type: string + namespace: + default: ovn-kubernetes + description: namespace where OVN-Kubernetes pods are deployed. + type: string + type: object + sampling: + default: 400 + description: sampling is the sampling rate on the reporter. + 100 means one flow on 100 is sent. To ensure cluster stability, + it is not possible to set a value below 2. If you really + want to sample every packet, which might impact the cluster + stability, refer to "forceSampleAll". Alternatively, you + can use the eBPF Agent instead of IPFIX. + format: int32 + minimum: 2 + type: integer + type: object + type: + default: EBPF + description: type selects the flows tracing agent. Possible values + are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" - + deprecated (*) - to use the legacy IPFIX collector. "EBPF" + is recommended in most cases as it offers better performances + and should work regardless of the CNI installed on the cluster. + "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work + if they support exporting IPFIX, but they would require manual + configuration). + enum: + - EBPF + - IPFIX + type: string + required: + - type + type: object + consolePlugin: + description: consolePlugin defines the settings related to the OpenShift + Console plugin, when available. + properties: + autoscaler: + description: autoscaler spec of a horizontal pod autoscaler to + set up for the plugin Deployment. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + logLevel: + default: info + description: logLevel for the console plugin backend + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + port: + default: 9001 + description: port is the plugin service port. Do not use 9002, + which is reserved for metrics. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + portNaming: + default: + enable: true + description: portNaming defines the configuration of the port-to-service + name translation + properties: + enable: + default: true + description: enable the console plugin port-to-service name + translation + type: boolean + portNames: + additionalProperties: + type: string + description: 'portNames defines additional port names to use + in the console. Example: portNames: {"3100": "loki"}' + type: object + type: object + quickFilters: + default: + - default: true + filter: + dst_namespace!: openshift-,netobserv + src_namespace!: openshift-,netobserv + name: Applications + - filter: + dst_namespace: openshift-,netobserv + src_namespace: openshift-,netobserv + name: Infrastructure + - default: true + filter: + dst_kind: Pod + src_kind: Pod + name: Pods network + - filter: + dst_kind: Service + name: Services network + description: quickFilters configures quick filter presets for + the Console plugin + items: + description: QuickFilter defines preset configuration for Console's + quick filters + properties: + default: + description: default defines whether this filter should + be active by default or not + type: boolean + filter: + additionalProperties: + type: string + description: 'filter is a set of keys and values to be set + when this filter is selected. Each key can relate to a + list of values using a coma-separated string. Example: + filter: {"src_namespace": "namespace1,namespace2"}' + type: object + name: + description: name of the filter, that will be displayed + in Console + type: string + required: + - filter + - name + type: object + type: array + register: + default: true + description: 'register allows, when set to true, to automatically + register the provided console plugin with the OpenShift Console + operator. When set to false, you can still register it manually + by editing console.operator.openshift.io/cluster. E.g: oc patch + console.operator.openshift.io cluster --type=''json'' -p ''[{"op": + "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' + type: boolean + replicas: + default: 1 + description: replicas defines the number of replicas (pods) to + start. + format: int32 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + description: 'resources, in terms of compute resources, required + by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + deploymentModel: + default: DIRECT + description: deploymentModel defines the desired type of deployment + for flow processing. Possible values are "DIRECT" (default) to make + the flow processor listening directly from the agents, or "KAFKA" + to make flows sent to a Kafka pipeline before consumption by the + processor. Kafka can provide better scalability, resiliency and + high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). + enum: + - DIRECT + - KAFKA + type: string + exporters: + description: exporters define additional optional exporters for custom + consumption or storage. + items: + description: FlowCollectorExporter defines an additional exporter + to send enriched flows to. + properties: + ipfix: + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. Unsupported (*). + properties: + targetHost: + default: "" + description: address of the ipfix external receiver + type: string + targetPort: + description: port for the ipfix external receiver + type: integer + transport: + description: Transport protocol (tcp/udp) to be used for + the IPFIX connection, defaults to tcp + enum: + - TCP + - UDP + type: string + required: + - targetHost + - targetPort + type: object + kafka: + description: kafka configuration, such as the address and topic, + to send enriched flows to. + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, + generally 9093. Note that, when eBPF agents are used, + Kafka certificate needs to be copied in the agent namespace + (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or + secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the + secret will be copied so that it can be mounted + as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv + will not create it. + type: string + required: + - address + - topic + type: object + type: + description: type selects the type of exporters. The available + options are "KAFKA" and "IPFIX". "IPFIX" is unsupported + (*). + enum: + - KAFKA + - IPFIX + type: string + required: + - type + type: object + type: array + kafka: + description: kafka configuration, allowing to use Kafka as a broker + as part of the flow collection pipeline. Available when the "spec.deploymentModel" + is "KAFKA". + properties: + address: + default: "" + description: address of the Kafka server + type: string + tls: + description: tls client configuration. When using TLS, verify + that the address matches the Kafka port used for TLS, generally + 9093. Note that, when eBPF agents are used, Kafka certificate + needs to be copied in the agent namespace (by default it's netobserv-privileged). + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, CACert + field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, one-way + TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret containing + certificates. If omitted, assumes same namespace as + where NetObserv is deployed. If the namespace is different, + the config map or the secret will be copied so that + it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: "configmap" + or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + topic: + default: "" + description: kafka topic to use. It must exist, NetObserv will + not create it. + type: string + required: + - address + - topic + type: object + loki: + description: loki, the flow store, client settings. + properties: + manual: + properties: + authToken: + default: DISABLED + description: AuthToken describe the way to get a token to + authenticate to Loki. DISABLED will not send any token with + the request. HOST - deprecated (*) - will use the + local pod service account to authenticate to Loki. FORWARD + will forward the user token for authorization. When using + the Loki Operator, this should be set to `FORWARD`. + enum: + - DISABLED + - HOST + - FORWARD + type: string + batchSize: + default: 102400 + description: batchSize is max batch size (in bytes) of logs + to accumulate before sending. + format: int64 + minimum: 1 + type: integer + batchWait: + default: 1s + description: batchWait is max time to wait before sending + a batch. + type: string + maxBackoff: + default: 5s + description: maxBackoff is the maximum backoff time for client + connection between retries. + type: string + maxRetries: + default: 2 + description: maxRetries is the maximum number of retries for + client connections. + format: int32 + minimum: 0 + type: integer + minBackoff: + default: 1s + description: minBackoff is the initial backoff time for client + connection between retries. + type: string + querierUrl: + description: querierURL specifies the address of the Loki + querier service, in case it is different from the Loki ingester + URL. If empty, the URL value will be used (assuming that + the Loki ingester and querier are in the same server). When + using the Loki Operator, do not set it, since ingestion + and queries use the Loki gateway. + type: string + staticLabels: + additionalProperties: + type: string + default: + app: netobserv-flowcollector + description: staticLabels is a map of common labels to set + on each flow. + type: object + statusTls: + description: tls client configuration for loki status URL. + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + statusUrl: + description: statusURL specifies the address of the Loki /ready + /metrics /config endpoints, in case it is different from + the Loki querier URL. If empty, the QuerierURL value will + be used. This is useful to show error messages and some + context in the frontend. When using the Loki Operator, set + it to the Loki HTTP query frontend service, for example + https://loki-query-frontend-http.netobserv.svc:3100/. statusTLS + configuration will be used when statusUrl is set. + type: string + tenantID: + default: netobserv + description: tenantID is the Loki X-Scope-OrgID that identifies + the tenant for each request. When using the Loki Operator, + set it to `network`, which corresponds to a special tenant + mode. + type: string + timeout: + default: 10s + description: timeout is the maximum time connection / request + limit. A Timeout of zero means no timeout. + type: string + tls: + description: tls client configuration for loki URL. + properties: + caCert: + description: caCert defines the reference of the certificate + for the Certificate Authority + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + enable: + default: false + description: enable TLS + type: boolean + insecureSkipVerify: + default: false + description: insecureSkipVerify allows skipping client-side + verification of the server certificate If set to true, + CACert field will be ignored + type: boolean + userCert: + description: userCert defines the user certificate reference, + used for mTLS (you can ignore it when using regular, + one-way TLS) + properties: + certFile: + description: certFile defines the path to the certificate + file name within the config map or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map or secret. + Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret containing + certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes same + namespace as where NetObserv is deployed. If the + namespace is different, the config map or the secret + will be copied so that it can be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: object + url: + default: http://loki:3100/ + description: url is the address of an existing Loki service + to push the flows to. When using the Loki Operator, set + it to the Loki gateway service with the `network` tenant + set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. + type: string + type: object + mode: + enum: + - manual + type: string + type: object + namespace: + description: namespace where NetObserv pods are deployed. If empty, + the namespace of the operator is going to be used. + type: string + processor: + description: processor defines the settings of the component that + receives the flows from the agent, enriches them, and forwards them + to the Loki persistence layer. + properties: + conversationEndTimeout: + default: 10s + description: conversation end timeout is the duration of time + to wait from the last flow log to end a conversation + type: string + conversationHeartbeatInterval: + default: 30s + description: conversation heartbeat interval is the duration of + time to wait between heartbeat reports of a conversation + type: string + conversationTerminatingTimeout: + default: 5s + description: conversation terminating timeout is the duration + of time to wait from detected FIN flag to end a connection + type: string + debug: + description: Debug allows setting some aspects of the internal + configuration of the flow processor. This section is aimed exclusively + for debugging and fine-grained performance optimizations (for + example GOGC, GOMAXPROCS env vars). Users setting its values + do it at their own risk. + properties: + env: + additionalProperties: + type: string + description: env allows passing custom environment variables + to the NetObserv Agent. Useful for passing some very concrete + performance-tuning options (such as GOGC, GOMAXPROCS) that + shouldn't be publicly exposed as part of the FlowCollector + descriptor, as they are only useful in edge debug and support + scenarios. + type: object + type: object + dropUnusedFields: + default: true + description: dropUnusedFields allows, when set to true, to drop + fields that are known to be unused by OVS, in order to save + storage space. + type: boolean + enableKubeProbes: + default: true + description: enableKubeProbes is a flag to enable or disable Kubernetes + liveness and readiness probes + type: boolean + healthPort: + default: 8080 + description: healthPort is a collector HTTP port in the Pod that + exposes the health check API + format: int32 + maximum: 65535 + minimum: 1 + type: integer + imagePullPolicy: + default: IfNotPresent + description: imagePullPolicy is the Kubernetes pull policy for + the image defined above + enum: + - IfNotPresent + - Always + - Never + type: string + kafkaConsumerAutoscaler: + description: kafkaConsumerAutoscaler spec of a horizontal pod + autoscaler to set up for flowlogs-pipeline-transformer, which + consumes Kafka messages. This setting is ignored when Kafka + is disabled. + properties: + maxReplicas: + default: 3 + description: maxReplicas is the upper limit for the number + of pods that can be set by the autoscaler; cannot be smaller + than MinReplicas. + format: int32 + type: integer + metrics: + description: metrics used by the pod autoscaler + items: + description: MetricSpec specifies how to scale based on + a single metric (only `type` and one other matching field + should be set at once). + properties: + containerResource: + description: containerResource refers to a resource + metric (such as those specified in requests and limits) + known to Kubernetes describing a single container + in each pod of the current scale target (e.g. CPU + or memory). Such metrics are built in to Kubernetes, + and have special scaling options on top of those available + to normal per-pod metrics using the "pods" source. + This is an alpha feature and can be enabled by the + HPAContainerMetrics feature flag. + properties: + container: + description: container is the name of the container + in the pods of the scaling target + type: string + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - container + - name + - target + type: object + external: + description: external refers to a global metric that + is not associated with any Kubernetes object. It allows + autoscaling based on information coming from components + running outside of cluster (for example length of + queue in cloud messaging service, or QPS from loadbalancer + running outside of cluster). + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + object: + description: object refers to a metric describing a + single kubernetes object (for example, hits-per-second + on an Ingress object). + properties: + describedObject: + description: describedObject specifies the descriptions + of a object,such as kind,name apiVersion + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - kind + - name + type: object + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - describedObject + - metric + - target + type: object + pods: + description: pods refers to a metric describing each + pod in the current scale target (for example, transactions-processed-per-second). The + values will be averaged together before being compared + to the target value. + properties: + metric: + description: metric identifies the target metric + by name and selector + properties: + name: + description: name is the name of the given metric + type: string + selector: + description: selector is the string-encoded + form of a standard kubernetes label selector + for the given metric When set, it is passed + as an additional parameter to the metrics + server for more specific metrics scoping. + When unset, just the metricName will be used + to gather metrics. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - name + type: object + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + resource: + description: resource refers to a resource metric (such + as those specified in requests and limits) known to + Kubernetes describing each pod in the current scale + target (e.g. CPU or memory). Such metrics are built + in to Kubernetes, and have special scaling options + on top of those available to normal per-pod metrics + using the "pods" source. + properties: + name: + description: name is the name of the resource in + question. + type: string + target: + description: target specifies the target value for + the given metric + properties: + averageUtilization: + description: averageUtilization is the target + value of the average of the resource metric + across all relevant pods, represented as a + percentage of the requested value of the resource + for the pods. Currently only valid for Resource + metric source type + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + description: averageValue is the target value + of the average of the metric across all relevant + pods (as a quantity) + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + description: type represents whether the metric + type is Utilization, Value, or AverageValue + type: string + value: + anyOf: + - type: integer + - type: string + description: value is the target value of the + metric (as a quantity). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - name + - target + type: object + type: + description: 'type is the type of metric source. It + should be one of "ContainerResource", "External", + "Object", "Pods" or "Resource", each mapping to a + matching field in the object. Note: "ContainerResource" + type is available on when the feature-gate HPAContainerMetrics + is enabled' + type: string + required: + - type + type: object + type: array + minReplicas: + description: minReplicas is the lower limit for the number + of replicas to which the autoscaler can scale down. It + defaults to 1 pod. minReplicas is allowed to be 0 if the + alpha feature gate HPAScaleToZero is enabled and at least + one Object or External metric is configured. Scaling is + active as long as at least one metric value is available. + format: int32 + type: integer + status: + default: DISABLED + description: Status describe the desired status regarding + deploying an horizontal pod autoscaler DISABLED will not + deploy an horizontal pod autoscaler ENABLED will deploy + an horizontal pod autoscaler + enum: + - DISABLED + - ENABLED + type: string + type: object + kafkaConsumerBatchSize: + default: 10485760 + description: 'kafkaConsumerBatchSize indicates to the broker the + maximum batch size, in bytes, that the consumer will accept. + Ignored when not using Kafka. Default: 10MB.' + type: integer + kafkaConsumerQueueCapacity: + default: 1000 + description: kafkaConsumerQueueCapacity defines the capacity of + the internal message queue used in the Kafka consumer client. + Ignored when not using Kafka. + type: integer + kafkaConsumerReplicas: + default: 3 + description: kafkaConsumerReplicas defines the number of replicas + (pods) to start for flowlogs-pipeline-transformer, which consumes + Kafka messages. This setting is ignored when Kafka is disabled. + format: int32 + minimum: 0 + type: integer + logLevel: + default: info + description: logLevel of the collector runtime + enum: + - trace + - debug + - info + - warn + - error + - fatal + - panic + type: string + logTypes: + default: FLOWS + description: logTypes defines the desired record types to generate. + Possible values are "FLOWS" (default) to export flowLogs, "CONVERSATIONS" + to generate newConnection, heartbeat, endConnection events, + "ENDED_CONVERSATIONS" to generate only endConnection events + or "ALL" to generate both flow logs and conversations events + enum: + - FLOWS + - CONVERSATIONS + - ENDED_CONVERSATIONS + - ALL + type: string + metrics: + description: Metrics define the processor configuration regarding + metrics + properties: + disableAlerts: + description: 'disableAlerts is a list of alerts that should + be disabled. Possible values are: `NetObservNoFlows`, which + is triggered when no flows are being observed for a certain + period. `NetObservLokiError`, which is triggered when flows + are being dropped due to Loki errors.' + items: + description: 'Name of a processor alert. Possible values + are: `NetObservNoFlows`, which is triggered when no flows + are being observed for a certain period. `NetObservLokiError`, + which is triggered when flows are being dropped due to + Loki errors.' + enum: + - NetObservNoFlows + - NetObservLokiError + type: string + type: array + ignoreTags: + default: + - egress + - packets + description: 'ignoreTags is a list of tags to specify which + metrics to ignore. Each metric is associated with a list + of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + . Available tags are: egress, ingress, flows, bytes, packets, + namespaces, nodes, workloads' + items: + type: string + type: array + server: + description: metricsServer endpoint configuration for Prometheus + scraper + properties: + port: + default: 9102 + description: the prometheus HTTP port + format: int32 + maximum: 65535 + minimum: 1 + type: integer + tls: + description: TLS configuration. + properties: + provided: + description: TLS configuration. + properties: + certFile: + description: certFile defines the path to the + certificate file name within the config map + or secret + type: string + certKey: + description: certKey defines the path to the certificate + private key file name within the config map + or secret. Omit when the key is not necessary. + type: string + name: + description: name of the config map or secret + containing certificates + type: string + namespace: + default: "" + description: namespace of the config map or secret + containing certificates. If omitted, assumes + same namespace as where NetObserv is deployed. + If the namespace is different, the config map + or the secret will be copied so that it can + be mounted as required. + type: string + type: + description: 'type for the certificate reference: + "configmap" or "secret"' + enum: + - configmap + - secret + type: string + type: object + type: + default: DISABLED + description: Select the type of TLS configuration + "DISABLED" (default) to not configure TLS for the + endpoint, "PROVIDED" to manually provide cert file + and a key file, and "AUTO" to use OpenShift auto + generated certificate using annotations + enum: + - DISABLED + - PROVIDED + - AUTO + type: string + type: object + type: object + type: object + port: + default: 2055 + description: 'port of the flow collector (host port) By conventions, + some value are not authorized port must not be below 1024 and + must not equal this values: 4789,6081,500, and 4500' + format: int32 + maximum: 65535 + minimum: 1025 + type: integer + profilePort: + description: profilePort allows setting up a Go pprof profiler + listening to this port + format: int32 + maximum: 65535 + minimum: 0 + type: integer + resources: + default: + limits: + memory: 800Mi + requests: + cpu: 100m + memory: 100Mi + description: 'resources are the compute resources required by + this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + required: + - agent + - deploymentModel + type: object + status: + description: FlowCollectorStatus defines the observed state of FlowCollector + properties: + conditions: + description: conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: namespace where console plugin and flowlogs-pipeline + have been deployed. + type: string + required: + - conditions + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml new file mode 100644 index 000000000..5ab6cccf4 --- /dev/null +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -0,0 +1,147 @@ +apiVersion: flows.netobserv.io/v1beta2 +kind: FlowCollector +metadata: + name: cluster +spec: + namespace: netobserv + deploymentModel: DIRECT + agent: + type: EBPF + ebpf: + imagePullPolicy: IfNotPresent + sampling: 50 + cacheActiveTimeout: 5s + cacheMaxFlows: 100000 + interfaces: [ ] + excludeInterfaces: [ "lo" ] + logLevel: info + resources: + requests: + memory: 50Mi + cpu: 100m + limits: + memory: 800Mi + kafkaBatchSize: 10485760 + processor: + port: 2055 + imagePullPolicy: IfNotPresent + logLevel: info + profilePort: 6060 + metrics: + server: + port: 9102 + ignoreTags: + - egress + - packets + disableAlerts: [] + dropUnusedFields: true + resources: + requests: + memory: 100Mi + cpu: 100m + limits: + memory: 800Mi + kafkaConsumerReplicas: 3 + kafkaConsumerAutoscaler: null + kafkaConsumerQueueCapacity: 1000 + kafkaConsumerBatchSize: 10485760 + logTypes: FLOWS + conversationTerminatingTimeout: 5s + conversationHeartbeatInterval: 30s + conversationEndTimeout: 10s + kafka: + address: "kafka-cluster-kafka-bootstrap.netobserv" + topic: network-flows + tls: + enable: false + caCert: + type: secret + name: kafka-cluster-cluster-ca-cert + certFile: ca.crt + userCert: + type: secret + name: flp-kafka + certFile: user.crt + certKey: user.key + loki: + mode: manual + manual: + url: 'http://loki.netobserv.svc:3100/' + # Uncomment lines below for typical installation with loki-operator (5.6+ needed) + # and ensure tls and statusTls are enabled + # url: 'https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network/' + # statusUrl: 'https://loki-query-frontend-http.netobserv.svc:3100/' + # authToken: FORWARD + tls: + enable: false + caCert: + type: configmap + name: loki-gateway-ca-bundle + certFile: service-ca.crt + insecureSkipVerify: false + statusTls: + enable: false + caCert: + certFile: service-ca.crt + name: loki-ca-bundle + type: configmap + insecureSkipVerify: false + userCert: + certFile: tls.crt + certKey: tls.key + name: loki-query-frontend-http + type: secret + batchWait: 1s + batchSize: 10485760 + minBackoff: 1s + maxBackoff: 5s + maxRetries: 2 + consolePlugin: + register: true + imagePullPolicy: IfNotPresent + port: 9001 + logLevel: info + autoscaler: + status: DISABLED + minReplicas: 1 + maxReplicas: 3 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + portNaming: + enable: true + portNames: + "3100": loki + quickFilters: + - name: Applications + filter: + src_namespace!: 'openshift-,netobserv' + dst_namespace!: 'openshift-,netobserv' + default: true + - name: Infrastructure + filter: + src_namespace: 'openshift-,netobserv' + dst_namespace: 'openshift-,netobserv' + - name: Pods network + filter: + src_kind: 'Pod' + dst_kind: 'Pod' + default: true + - name: Services network + filter: + dst_kind: 'Service' + exporters: [] + # - type: KAFKA + # kafka: + # address: "kafka-cluster-kafka-bootstrap.netobserv" + # topic: netobserv-flows-export + # or + # - type: IPFIX + # ipfix: + # targetHost: "ipfix-collector.ipfix.svc.cluster.local" + # targetPort: 4739 + # transport: TCP or UDP (optional - defaults to TCP) \ No newline at end of file diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index d8bf6531e..a50d710b0 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -17,7 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/volumes" @@ -175,7 +175,7 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { "-key", "/var/serving-cert/tls.key", "-loki", querierURL, "-loki-labels", strings.Join(indexFields, ","), - "-loki-tenant-id", desired.Loki.TenantID, + "-loki-tenant-id", desired.Loki.Manual.TenantID, "-loglevel", desired.ConsolePlugin.LogLevel, "-frontend-config", filepath.Join(configPath, configFile), } @@ -188,11 +188,11 @@ func (b *builder) buildArgs(desired *flowslatest.FlowCollectorSpec) []string { args = append(args, "-loki-status", statusURL) } - if desired.Loki.TLS.Enable { - if desired.Loki.TLS.InsecureSkipVerify { + if desired.Loki.Manual.TLS.Enable { + if desired.Loki.Manual.TLS.InsecureSkipVerify { args = append(args, "-loki-skip-tls") } else { - caPath := b.volumes.AddCACertificate(&desired.Loki.TLS, "loki-certs") + caPath := b.volumes.AddCACertificate(&desired.Loki.Manual.TLS, "loki-certs") if caPath != "" { args = append(args, "-loki-ca-path", caPath) } diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go index 57a24b685..af90d9bd8 100644 --- a/controllers/consoleplugin/consoleplugin_reconciler.go +++ b/controllers/consoleplugin/consoleplugin_reconciler.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -111,10 +111,10 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC // Watch for Loki certificates if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } - if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.StatusTLS, r.Namespace); err != nil { + if _, _, err = r.Watcher.ProcessMTLSCerts(ctx, r.Client, &desired.Spec.Loki.Manual.StatusTLS, r.Namespace); err != nil { return err } @@ -265,15 +265,15 @@ func pluginNeedsUpdate(plg *osv1alpha1.ConsolePlugin, desired *pluginSpec) bool } func querierURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.QuerierURL != "" { - return loki.QuerierURL + if loki.Manual.QuerierURL != "" { + return loki.Manual.QuerierURL } - return loki.URL + return loki.Manual.URL } func statusURL(loki *flowslatest.FlowCollectorLoki) string { - if loki.StatusURL != "" { - return loki.StatusURL + if loki.Manual.StatusURL != "" { + return loki.Manual.StatusURL } return querierURL(loki) } diff --git a/controllers/consoleplugin/consoleplugin_test.go b/controllers/consoleplugin/consoleplugin_test.go index 1e9d0e212..8235962d4 100644 --- a/controllers/consoleplugin/consoleplugin_test.go +++ b/controllers/consoleplugin/consoleplugin_test.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" ) @@ -106,7 +106,7 @@ func TestContainerUpdateCheck(t *testing.T) { //equals specs plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://loki:3100/", TenantID: "netobserv"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{URL: "http://loki:3100/", TenantID: "netobserv"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) old := builder.deployment("digest") @@ -151,14 +151,14 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //new loki config - loki = flowslatest.FlowCollectorLoki{URL: "http://loki:3100/", TenantID: "netobserv", TLS: flowslatest.ClientTLS{ + loki = flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{URL: "http://loki:3100/", TenantID: "netobserv", TLS: flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", Name: "cm-name", CertFile: "ca.crt", }, - }} + }}} spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) new = builder.deployment("digest") @@ -168,7 +168,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //new loki cert name - loki.TLS.CACert.Name = "cm-name-2" + loki.Manual.TLS.CACert.Name = "cm-name-2" spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) new = builder.deployment("digest") @@ -178,7 +178,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //test again no change - loki.TLS.CACert.Name = "cm-name-2" + loki.Manual.TLS.CACert.Name = "cm-name-2" spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) new = builder.deployment("digest") @@ -188,8 +188,8 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //set status url and enable default tls - loki.StatusURL = "http://loki.status:3100/" - loki.StatusTLS.Enable = true + loki.Manual.StatusURL = "http://loki.status:3100/" + loki.Manual.StatusTLS.Enable = true spec = flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder = newBuilder(testNamespace, testImage, &spec) @@ -200,7 +200,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //update status ca cert - loki.StatusTLS.CACert = flowslatest.CertificateReference{ + loki.Manual.StatusTLS.CACert = flowslatest.CertificateReference{ Type: "configmap", Name: "status-cm-name", CertFile: "status-ca.crt", @@ -215,7 +215,7 @@ func TestContainerUpdateCheck(t *testing.T) { old = new //update status user cert - loki.StatusTLS.UserCert = flowslatest.CertificateReference{ + loki.Manual.StatusTLS.UserCert = flowslatest.CertificateReference{ Type: "secret", Name: "sec-name", CertFile: "tls.crt", @@ -260,7 +260,7 @@ func TestBuiltService(t *testing.T) { //newly created service should not need update plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://foo:1234"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{URL: "http://foo:1234"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) old := builder.mainService() @@ -274,7 +274,7 @@ func TestLabels(t *testing.T) { assert := assert.New(t) plugin := getPluginConfig() - loki := flowslatest.FlowCollectorLoki{URL: "http://foo:1234"} + loki := flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{URL: "http://foo:1234"}} spec := flowslatest.FlowCollectorSpec{ConsolePlugin: plugin, Loki: loki} builder := newBuilder(testNamespace, testImage, &spec) diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 887c1efab..f0e7eed54 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -14,7 +14,7 @@ import ( "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions" "github.com/netobserv/network-observability-operator/controllers/operator" diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index acfa8c721..788e450f5 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index e75262105..c49316dca 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/consoleplugin" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf" diff --git a/controllers/flowcollector_controller_certificates_test.go b/controllers/flowcollector_controller_certificates_test.go index e74f5e2b2..b1e4001bb 100644 --- a/controllers/flowcollector_controller_certificates_test.go +++ b/controllers/flowcollector_controller_certificates_test.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" @@ -128,7 +128,7 @@ func flowCollectorCertificatesSpecs() { Agent: flowslatest.FlowCollectorAgent{ Type: "EBPF", }, - Loki: flowslatest.FlowCollectorLoki{ + Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ AuthToken: flowslatest.LokiAuthForwardUserToken, TLS: flowslatest.ClientTLS{ Enable: true, @@ -139,7 +139,7 @@ func flowCollectorCertificatesSpecs() { CertFile: "cert.crt", }, }, - }, + }}, Kafka: flowslatest.FlowCollectorKafka{ TLS: flowslatest.ClientTLS{ Enable: true, diff --git a/controllers/flowcollector_controller_console_test.go b/controllers/flowcollector_controller_console_test.go index ce4360e2e..5a9a9cb3f 100644 --- a/controllers/flowcollector_controller_console_test.go +++ b/controllers/flowcollector_controller_console_test.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" ) @@ -213,14 +213,14 @@ func flowCollectorConsolePluginSpecs() { }) It("Should update the Loki URL in the Console Plugin if it changes in the Spec", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.URL = "http://loki.namespace:8888" + fc.Spec.Loki.Manual.URL = "http://loki.namespace:8888" }) Eventually(getContainerArgumentAfter("netobserv-plugin", "-loki", cpKey), timeout, interval).Should(Equal("http://loki.namespace:8888")) }) It("Should use the Loki Querier URL instead of the Loki URL, if the first is defined", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.QuerierURL = "http://loki-querier:6789" + fc.Spec.Loki.Manual.QuerierURL = "http://loki-querier:6789" }) Eventually(getContainerArgumentAfter("netobserv-plugin", "-loki", cpKey), timeout, interval).Should(Equal("http://loki-querier:6789")) diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go index e61d6dd22..87cca0f0b 100644 --- a/controllers/flowcollector_controller_ebpf_test.go +++ b/controllers/flowcollector_controller_ebpf_test.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 78d9f4bb1..17d9af9ad 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) // nolint:cyclop @@ -113,7 +113,7 @@ func flowCollectorIsoSpecs() { }, QuickFilters: []flowslatest.QuickFilter{}, }, - Loki: flowslatest.FlowCollectorLoki{ + Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ URL: "http://loki", QuerierURL: "", StatusURL: "", @@ -158,7 +158,7 @@ func flowCollectorIsoSpecs() { CertKey: "", }, }, - }, + }}, Kafka: flowslatest.FlowCollectorKafka{ Address: "http://kafka", Topic: "topic", diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 716abfe5c..10981a5b9 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -17,7 +17,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" @@ -338,7 +338,7 @@ func flowCollectorControllerSpecs() { It("Should redeploy if the spec doesn't change but the external flowlogs-pipeline-config does", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.MaxRetries = pointer.Int32(7) + fc.Spec.Loki.Manual.MaxRetries = pointer.Int32(7) }) By("Expecting that the flowlogsPipeline.PodConfigurationDigest attribute has changed") @@ -612,7 +612,7 @@ func flowCollectorControllerSpecs() { }, })).Should(Succeed()) UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.TLS = flowslatest.ClientTLS{ + fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: flowslatest.RefTypeConfigMap, @@ -637,7 +637,7 @@ func flowCollectorControllerSpecs() { It("Should restore no TLS config", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Loki.TLS = flowslatest.ClientTLS{ + fc.Spec.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: false, } }) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index 743a3eadb..e92085c25 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -20,7 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/filters" @@ -440,20 +440,20 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) (*corev // loki stage (write) configuration lokiWrite := api.WriteLoki{ Labels: indexFields, - BatchSize: int(b.desired.Loki.BatchSize), - BatchWait: b.desired.Loki.BatchWait.ToUnstructured().(string), - MaxBackoff: b.desired.Loki.MaxBackoff.ToUnstructured().(string), - MaxRetries: int(helper.PtrInt32(b.desired.Loki.MaxRetries)), - MinBackoff: b.desired.Loki.MinBackoff.ToUnstructured().(string), + BatchSize: int(b.desired.Loki.Manual.BatchSize), + BatchWait: b.desired.Loki.Manual.BatchWait.ToUnstructured().(string), + MaxBackoff: b.desired.Loki.Manual.MaxBackoff.ToUnstructured().(string), + MaxRetries: int(helper.PtrInt32(b.desired.Loki.Manual.MaxRetries)), + MinBackoff: b.desired.Loki.Manual.MinBackoff.ToUnstructured().(string), StaticLabels: model.LabelSet{}, - Timeout: b.desired.Loki.Timeout.ToUnstructured().(string), - URL: b.desired.Loki.URL, + Timeout: b.desired.Loki.Manual.Timeout.ToUnstructured().(string), + URL: b.desired.Loki.Manual.URL, TimestampLabel: "TimeFlowEndMs", TimestampScale: "1ms", - TenantID: b.desired.Loki.TenantID, + TenantID: b.desired.Loki.Manual.TenantID, } - for k, v := range b.desired.Loki.StaticLabels { + for k, v := range b.desired.Loki.Manual.StaticLabels { lokiWrite.StaticLabels[model.LabelName(k)] = model.LabelValue(v) } @@ -466,8 +466,8 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) (*corev } } - if b.desired.Loki.TLS.Enable { - if b.desired.Loki.TLS.InsecureSkipVerify { + if b.desired.Loki.Manual.TLS.Enable { + if b.desired.Loki.Manual.TLS.InsecureSkipVerify { lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ Authorization: authorization, TLSConfig: promConfig.TLSConfig{ @@ -475,7 +475,7 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) (*corev }, } } else { - caPath := b.volumes.AddCACertificate(&b.desired.Loki.TLS, "loki-certs") + caPath := b.volumes.AddCACertificate(&b.desired.Loki.Manual.TLS, "loki-certs") lokiWrite.ClientConfig = &promConfig.HTTPClientConfig{ Authorization: authorization, TLSConfig: promConfig.TLSConfig{ diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index 45fd09eb1..3e9efe24a 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -8,7 +8,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index dbc714f05..2a91c66cd 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index 50b6a3d0b..c8a926029 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -8,7 +8,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_monolith_reconciler.go b/controllers/flowlogspipeline/flp_monolith_reconciler.go index f550eeff9..96b200195 100644 --- a/controllers/flowlogspipeline/flp_monolith_reconciler.go +++ b/controllers/flowlogspipeline/flp_monolith_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -122,7 +122,7 @@ func (r *flpMonolithReconciler) reconcile(ctx context.Context, desired *flowslat // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } diff --git a/controllers/flowlogspipeline/flp_reconciler.go b/controllers/flowlogspipeline/flp_reconciler.go index 3d2fd64bf..5917f1c65 100644 --- a/controllers/flowlogspipeline/flp_reconciler.go +++ b/controllers/flowlogspipeline/flp_reconciler.go @@ -10,7 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/watchers" diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 1e9fc761f..b861f109e 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/pointer" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -98,7 +98,7 @@ func getConfig() flowslatest.FlowCollectorSpec { Duration: conntrackTerminatingTimeout, }, }, - Loki: flowslatest.FlowCollectorLoki{ + Loki: flowslatest.FlowCollectorLoki{Manual: flowslatest.LokiManualParams{ URL: "http://loki:3100/", BatchWait: metav1.Duration{ Duration: 1, @@ -112,7 +112,7 @@ func getConfig() flowslatest.FlowCollectorSpec { }, MaxRetries: pointer.Int32(10), StaticLabels: map[string]string{"app": "netobserv-flowcollector"}, - }, + }}, Kafka: flowslatest.FlowCollectorKafka{ Address: "kafka", Topic: "flp", @@ -278,7 +278,7 @@ func TestDaemonSetChanged(t *testing.T) { assert.Contains(report.String(), "no change") // Check Loki config change - cfg.Loki.TLS = flowslatest.ClientTLS{ + cfg.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", @@ -296,7 +296,7 @@ func TestDaemonSetChanged(t *testing.T) { assert.Contains(report.String(), "config-digest") // Check volumes change - cfg.Loki.TLS = flowslatest.ClientTLS{ + cfg.Loki.Manual.TLS = flowslatest.ClientTLS{ Enable: true, CACert: flowslatest.CertificateReference{ Type: "configmap", @@ -623,12 +623,12 @@ func TestConfigMapShouldDeserializeAsJSON(t *testing.T) { assert.Equal(cfg.Processor.Port, int32(params[0].Ingest.Collector.Port)) lokiCfg := params[3].Write.Loki - assert.Equal(loki.URL, lokiCfg.URL) - assert.Equal(loki.BatchWait.Duration.String(), lokiCfg.BatchWait) - assert.Equal(loki.MinBackoff.Duration.String(), lokiCfg.MinBackoff) - assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff) - assert.EqualValues(*loki.MaxRetries, lokiCfg.MaxRetries) - assert.EqualValues(loki.BatchSize, lokiCfg.BatchSize) + assert.Equal(loki.Manual.URL, lokiCfg.URL) + assert.Equal(loki.Manual.BatchWait.Duration.String(), lokiCfg.BatchWait) + assert.Equal(loki.Manual.MinBackoff.Duration.String(), lokiCfg.MinBackoff) + assert.Equal(loki.Manual.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff) + assert.EqualValues(*loki.Manual.MaxRetries, lokiCfg.MaxRetries) + assert.EqualValues(loki.Manual.BatchSize, lokiCfg.BatchSize) assert.EqualValues([]string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_Namespace", "DstK8S_OwnerName", "FlowDirection", "_RecordType"}, lokiCfg.Labels) assert.Equal(`{app="netobserv-flowcollector"}`, fmt.Sprintf("%v", lokiCfg.StaticLabels)) diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index d9ae34735..312320413 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -9,7 +9,7 @@ import ( "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/flowlogspipeline/flp_transfo_reconciler.go b/controllers/flowlogspipeline/flp_transfo_reconciler.go index 6d8e812e4..a0b27d11a 100644 --- a/controllers/flowlogspipeline/flp_transfo_reconciler.go +++ b/controllers/flowlogspipeline/flp_transfo_reconciler.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -122,7 +122,7 @@ func (r *flpTransformerReconciler) reconcile(ctx context.Context, desired *flows // Watch for Loki certificate if necessary; we'll ignore in that case the returned digest, as we don't need to restart pods on cert rotation // because certificate is always reloaded from file - if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.TLS, r.Namespace); err != nil { + if _, err = r.Watcher.ProcessCACert(ctx, r.Client, &desired.Spec.Loki.Manual.TLS, r.Namespace); err != nil { return err } diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index 4423f8278..1916372d9 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index f67ff67d0..926aed71c 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -13,7 +13,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go index 5a53ef2fe..b84a20957 100644 --- a/controllers/ovs/flowsconfig_types.go +++ b/controllers/ovs/flowsconfig_types.go @@ -8,7 +8,7 @@ import ( "github.com/mitchellh/mapstructure" "sigs.k8s.io/controller-runtime/pkg/log" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) type flowsConfig struct { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 2786b986e..43810db45 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -44,6 +44,7 @@ import ( flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/operator" //+kubebuilder:scaffold:imports ) @@ -107,6 +108,9 @@ var _ = BeforeSuite(func() { err = flowsv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = flowsv1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = corev1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 0d5b244d1..894feeaf9 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -4,6 +4,7 @@ Packages: - [flows.netobserv.io/v1alpha1](#flowsnetobserviov1alpha1) - [flows.netobserv.io/v1beta1](#flowsnetobserviov1beta1) +- [flows.netobserv.io/v1beta2](#flowsnetobserviov1beta2) # flows.netobserv.io/v1alpha1 @@ -7882,6 +7883,4149 @@ FlowCollectorStatus defines the observed state of FlowCollector +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // other fields } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
lastTransitionTimestring + lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+
+ Format: date-time
+
true
messagestring + message is a human readable message indicating details about the transition. This may be an empty string.
+
true
reasonstring + reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+
true
statusenum + status of the condition, one of True, False, Unknown.
+
+ Enum: True, False, Unknown
+
true
typestring + type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+
true
observedGenerationinteger + observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+
+ Format: int64
+ Minimum: 0
+
false
+ +# flows.netobserv.io/v1beta2 + +Resource Types: + +- [FlowCollector](#flowcollector) + + + + +## FlowCollector +[↩ Parent](#flowsnetobserviov1beta2 ) + + + + + + +FlowCollector is the schema for the network flows collection API, which pilots and configures the underlying deployments. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringflows.netobserv.io/v1beta2true
kindstringFlowCollectortrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject + FlowCollectorSpec defines the desired state of FlowCollector.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It may have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers may provide some support for these features as a best effort only.
+
false
statusobject + FlowCollectorStatus defines the observed state of FlowCollector
+
false
+ + +### FlowCollector.spec +[↩ Parent](#flowcollector-1) + + + +FlowCollectorSpec defines the desired state of FlowCollector.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It may have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers may provide some support for these features as a best effort only. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
agentobject + agent for flows extraction.
+
+ Default: map[type:EBPF]
+
true
deploymentModelenum + deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+
+ Enum: DIRECT, KAFKA
+ Default: DIRECT
+
true
consolePluginobject + consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+
false
exporters[]object + exporters define additional optional exporters for custom consumption or storage.
+
false
kafkaobject + kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA".
+
false
lokiobject + loki, the flow store, client settings.
+
false
namespacestring + namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
+
false
processorobject + processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+
false
+ + +### FlowCollector.spec.agent +[↩ Parent](#flowcollectorspec-1) + + + +agent for flows extraction. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typeenum + type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" - deprecated (*) - to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+
+ Enum: EBPF, IPFIX
+ Default: EBPF
+
true
ebpfobject + ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+
false
ipfixobject + ipfix - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX".
+
false
+ + +### FlowCollector.spec.agent.ebpf +[↩ Parent](#flowcollectorspecagent-1) + + + +ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF". + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cacheActiveTimeoutstring + cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+
+ Default: 5s
+
false
cacheMaxFlowsinteger + cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+
+ Format: int32
+ Default: 100000
+ Minimum: 1
+
false
debugobject + Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+
false
excludeInterfaces[]string + excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+
+ Default: [lo]
+
false
imagePullPolicyenum + imagePullPolicy is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
interfaces[]string + interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+
false
kafkaBatchSizeinteger + kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.
+
+ Default: 10485760
+
false
logLevelenum + logLevel defines the log level for the NetObserv eBPF Agent
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
privilegedboolean + privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges.
+
false
resourcesobject + resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:50Mi]]
+
false
samplinginteger + sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+
+ Format: int32
+ Default: 50
+ Minimum: 0
+
false
+ + +### FlowCollector.spec.agent.ebpf.debug +[↩ Parent](#flowcollectorspecagentebpf-1) + + + +Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
envmap[string]string + env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+
false
+ + +### FlowCollector.spec.agent.ebpf.resources +[↩ Parent](#flowcollectorspecagentebpf-1) + + + +resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.spec.agent.ipfix +[↩ Parent](#flowcollectorspecagent-1) + + + +ipfix - deprecated (*) - describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX". + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cacheActiveTimeoutstring + cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending
+
+ Default: 20s
+
false
cacheMaxFlowsinteger + cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows
+
+ Format: int32
+ Default: 400
+ Minimum: 0
+
false
clusterNetworkOperatorobject + clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available.
+
false
forceSampleAllboolean + forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored.
+
+ Default: false
+
false
ovnKubernetesobject + ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+
false
samplinginteger + sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX.
+
+ Format: int32
+ Default: 400
+ Minimum: 2
+
false
+ + +### FlowCollector.spec.agent.ipfix.clusterNetworkOperator +[↩ Parent](#flowcollectorspecagentipfix-1) + + + +clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namespacestring + namespace where the config map is going to be deployed.
+
+ Default: openshift-network-operator
+
false
+ + +### FlowCollector.spec.agent.ipfix.ovnKubernetes +[↩ Parent](#flowcollectorspecagentipfix-1) + + + +ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerNamestring + containerName defines the name of the container to configure for IPFIX.
+
+ Default: ovnkube-node
+
false
daemonSetNamestring + daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods.
+
+ Default: ovnkube-node
+
false
namespacestring + namespace where OVN-Kubernetes pods are deployed.
+
+ Default: ovn-kubernetes
+
false
+ + +### FlowCollector.spec.consolePlugin +[↩ Parent](#flowcollectorspec-1) + + + +consolePlugin defines the settings related to the OpenShift Console plugin, when available. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
autoscalerobject + autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+
false
imagePullPolicyenum + imagePullPolicy is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
logLevelenum + logLevel for the console plugin backend
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
portinteger + port is the plugin service port. Do not use 9002, which is reserved for metrics.
+
+ Format: int32
+ Default: 9001
+ Minimum: 1
+ Maximum: 65535
+
false
portNamingobject + portNaming defines the configuration of the port-to-service name translation
+
+ Default: map[enable:true]
+
false
quickFilters[]object + quickFilters configures quick filter presets for the Console plugin
+
+ Default: [map[default:true filter:map[dst_namespace!:openshift-,netobserv src_namespace!:openshift-,netobserv] name:Applications] map[filter:map[dst_namespace:openshift-,netobserv src_namespace:openshift-,netobserv] name:Infrastructure] map[default:true filter:map[dst_kind:Pod src_kind:Pod] name:Pods network] map[filter:map[dst_kind:Service] name:Services network]]
+
false
registerboolean + register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'
+
+ Default: true
+
false
replicasinteger + replicas defines the number of replicas (pods) to start.
+
+ Format: int32
+ Default: 1
+ Minimum: 0
+
false
resourcesobject + resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:100Mi] requests:map[cpu:100m memory:50Mi]]
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxReplicasinteger + maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+
+ Format: int32
+ Default: 3
+
false
metrics[]object + metrics used by the pod autoscaler
+
false
minReplicasinteger + minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+
+ Format: int32
+
false
statusenum + Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+
+ Enum: DISABLED, ENABLED
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscaler-1) + + + +MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+
true
containerResourceobject + containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+
false
externalobject + external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+
false
objectobject + object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+
false
podsobject + pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+
false
resourceobject + resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstring + container is the name of the container in the pods of the scaling target
+
true
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexcontainerresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
describedObjectobject + describedObject specifies the descriptions of a object,such as kind,name apiVersion
+
true
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.describedObject +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +describedObject specifies the descriptions of a object,such as kind,name apiVersion + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstring + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+
true
namestring + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+
true
apiVersionstring + API version of the referent
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) + + + +resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource.target +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.consolePlugin.portNaming +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +portNaming defines the configuration of the port-to-service name translation + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
enableboolean + enable the console plugin port-to-service name translation
+
+ Default: true
+
false
portNamesmap[string]string + portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"}
+
false
+ + +### FlowCollector.spec.consolePlugin.quickFilters[index] +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +QuickFilter defines preset configuration for Console's quick filters + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
filtermap[string]string + filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"}
+
true
namestring + name of the filter, that will be displayed in Console
+
true
defaultboolean + default defines whether this filter should be active by default or not
+
false
+ + +### FlowCollector.spec.consolePlugin.resources +[↩ Parent](#flowcollectorspecconsoleplugin-1) + + + +resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.spec.exporters[index] +[↩ Parent](#flowcollectorspec-1) + + + +FlowCollectorExporter defines an additional exporter to send enriched flows to. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typeenum + type selects the type of exporters. The available options are "KAFKA" and "IPFIX". "IPFIX" is unsupported (*).
+
+ Enum: KAFKA, IPFIX
+
true
ipfixobject + IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*).
+
false
kafkaobject + kafka configuration, such as the address and topic, to send enriched flows to.
+
false
+ + +### FlowCollector.spec.exporters[index].ipfix +[↩ Parent](#flowcollectorspecexportersindex-1) + + + +IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. Unsupported (*). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
targetHoststring + address of the ipfix external receiver
+
+ Default:
+
true
targetPortinteger + port for the ipfix external receiver
+
true
transportenum + Transport protocol (tcp/udp) to be used for the IPFIX connection, defaults to tcp
+
+ Enum: TCP, UDP
+
false
+ + +### FlowCollector.spec.exporters[index].kafka +[↩ Parent](#flowcollectorspecexportersindex-1) + + + +kafka configuration, such as the address and topic, to send enriched flows to. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
addressstring + address of the Kafka server
+
+ Default:
+
true
topicstring + kafka topic to use. It must exist, NetObserv will not create it.
+
+ Default:
+
true
tlsobject + tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls +[↩ Parent](#flowcollectorspecexportersindexkafka-1) + + + +tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + caCert defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+
+ Default: false
+
false
userCertobject + userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls.caCert +[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) + + + +caCert defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.exporters[index].kafka.tls.userCert +[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) + + + +userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.kafka +[↩ Parent](#flowcollectorspec-1) + + + +kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
addressstring + address of the Kafka server
+
+ Default:
+
true
topicstring + kafka topic to use. It must exist, NetObserv will not create it.
+
+ Default:
+
true
tlsobject + tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+
false
+ + +### FlowCollector.spec.kafka.tls +[↩ Parent](#flowcollectorspeckafka-1) + + + +tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + caCert defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+
+ Default: false
+
false
userCertobject + userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+
false
+ + +### FlowCollector.spec.kafka.tls.caCert +[↩ Parent](#flowcollectorspeckafkatls-1) + + + +caCert defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.kafka.tls.userCert +[↩ Parent](#flowcollectorspeckafkatls-1) + + + +userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki +[↩ Parent](#flowcollectorspec-1) + + + +loki, the flow store, client settings. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
manualobject +
+
false
modeenum +
+
+ Enum: manual
+
false
+ + +### FlowCollector.spec.loki.manual +[↩ Parent](#flowcollectorspecloki-1) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
authTokenenum + AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST - deprecated (*) - will use the local pod service account to authenticate to Loki. FORWARD will forward the user token for authorization. When using the Loki Operator, this should be set to `FORWARD`.
+
+ Enum: DISABLED, HOST, FORWARD
+ Default: DISABLED
+
false
batchSizeinteger + batchSize is max batch size (in bytes) of logs to accumulate before sending.
+
+ Format: int64
+ Default: 102400
+ Minimum: 1
+
false
batchWaitstring + batchWait is max time to wait before sending a batch.
+
+ Default: 1s
+
false
maxBackoffstring + maxBackoff is the maximum backoff time for client connection between retries.
+
+ Default: 5s
+
false
maxRetriesinteger + maxRetries is the maximum number of retries for client connections.
+
+ Format: int32
+ Default: 2
+ Minimum: 0
+
false
minBackoffstring + minBackoff is the initial backoff time for client connection between retries.
+
+ Default: 1s
+
false
querierUrlstring + querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
+
false
staticLabelsmap[string]string + staticLabels is a map of common labels to set on each flow.
+
+ Default: map[app:netobserv-flowcollector]
+
false
statusTlsobject + tls client configuration for loki status URL.
+
false
statusUrlstring + statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. statusTLS configuration will be used when statusUrl is set.
+
false
tenantIDstring + tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.
+
+ Default: netobserv
+
false
timeoutstring + timeout is the maximum time connection / request limit. A Timeout of zero means no timeout.
+
+ Default: 10s
+
false
tlsobject + tls client configuration for loki URL.
+
false
urlstring + url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.
+
+ Default: http://loki:3100/
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls +[↩ Parent](#flowcollectorspeclokimanual) + + + +tls client configuration for loki status URL. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + caCert defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+
+ Default: false
+
false
userCertobject + userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls.caCert +[↩ Parent](#flowcollectorspeclokimanualstatustls) + + + +caCert defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.statusTls.userCert +[↩ Parent](#flowcollectorspeclokimanualstatustls) + + + +userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.tls +[↩ Parent](#flowcollectorspeclokimanual) + + + +tls client configuration for loki URL. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
caCertobject + caCert defines the reference of the certificate for the Certificate Authority
+
false
enableboolean + enable TLS
+
+ Default: false
+
false
insecureSkipVerifyboolean + insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+
+ Default: false
+
false
userCertobject + userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+
false
+ + +### FlowCollector.spec.loki.manual.tls.caCert +[↩ Parent](#flowcollectorspeclokimanualtls) + + + +caCert defines the reference of the certificate for the Certificate Authority + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.loki.manual.tls.userCert +[↩ Parent](#flowcollectorspeclokimanualtls) + + + +userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.processor +[↩ Parent](#flowcollectorspec-1) + + + +processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
conversationEndTimeoutstring + conversation end timeout is the duration of time to wait from the last flow log to end a conversation
+
+ Default: 10s
+
false
conversationHeartbeatIntervalstring + conversation heartbeat interval is the duration of time to wait between heartbeat reports of a conversation
+
+ Default: 30s
+
false
conversationTerminatingTimeoutstring + conversation terminating timeout is the duration of time to wait from detected FIN flag to end a connection
+
+ Default: 5s
+
false
debugobject + Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+
false
dropUnusedFieldsboolean + dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space.
+
+ Default: true
+
false
enableKubeProbesboolean + enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes
+
+ Default: true
+
false
healthPortinteger + healthPort is a collector HTTP port in the Pod that exposes the health check API
+
+ Format: int32
+ Default: 8080
+ Minimum: 1
+ Maximum: 65535
+
false
imagePullPolicyenum + imagePullPolicy is the Kubernetes pull policy for the image defined above
+
+ Enum: IfNotPresent, Always, Never
+ Default: IfNotPresent
+
false
kafkaConsumerAutoscalerobject + kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+
false
kafkaConsumerBatchSizeinteger + kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.
+
+ Default: 10485760
+
false
kafkaConsumerQueueCapacityinteger + kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.
+
+ Default: 1000
+
false
kafkaConsumerReplicasinteger + kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+
+ Format: int32
+ Default: 3
+ Minimum: 0
+
false
logLevelenum + logLevel of the collector runtime
+
+ Enum: trace, debug, info, warn, error, fatal, panic
+ Default: info
+
false
logTypesenum + logTypes defines the desired record types to generate. Possible values are "FLOWS" (default) to export flowLogs, "CONVERSATIONS" to generate newConnection, heartbeat, endConnection events, "ENDED_CONVERSATIONS" to generate only endConnection events or "ALL" to generate both flow logs and conversations events
+
+ Enum: FLOWS, CONVERSATIONS, ENDED_CONVERSATIONS, ALL
+ Default: FLOWS
+
false
metricsobject + Metrics define the processor configuration regarding metrics
+
false
portinteger + port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500
+
+ Format: int32
+ Default: 2055
+ Minimum: 1025
+ Maximum: 65535
+
false
profilePortinteger + profilePort allows setting up a Go pprof profiler listening to this port
+
+ Format: int32
+ Minimum: 0
+ Maximum: 65535
+
false
resourcesobject + resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
+ Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:100Mi]]
+
false
+ + +### FlowCollector.spec.processor.debug +[↩ Parent](#flowcollectorspecprocessor-1) + + + +Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
envmap[string]string + env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler +[↩ Parent](#flowcollectorspecprocessor-1) + + + +kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
maxReplicasinteger + maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+
+ Format: int32
+ Default: 3
+
false
metrics[]object + metrics used by the pod autoscaler
+
false
minReplicasinteger + minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+
+ Format: int32
+
false
statusenum + Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+
+ Enum: DISABLED, ENABLED
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscaler-1) + + + +MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+
true
containerResourceobject + containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+
false
externalobject + external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+
false
objectobject + object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+
false
podsobject + pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+
false
resourceobject + resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
containerstring + container is the name of the container in the pods of the scaling target
+
true
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexcontainerresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
describedObjectobject + describedObject specifies the descriptions of a object,such as kind,name apiVersion
+
true
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.describedObject +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +describedObject specifies the descriptions of a object,such as kind,name apiVersion + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
kindstring + Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+
true
namestring + Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
+
true
apiVersionstring + API version of the referent
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
metricobject + metric identifies the target metric by name and selector
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) + + + +metric identifies the target metric by name and selector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the given metric
+
true
selectorobject + selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetric-1) + + + +selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
matchExpressions[]object + matchExpressions is a list of label selector requirements. The requirements are ANDed.
+
false
matchLabelsmap[string]string + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector.matchExpressions[index] +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetricselector-1) + + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
keystring + key is the label key that the selector applies to.
+
true
operatorstring + operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+
true
values[]string + values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) + + + +resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
namestring + name is the name of the resource in question.
+
true
targetobject + target specifies the target value for the given metric
+
true
+ + +### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource.target +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexresource-1) + + + +target specifies the target value for the given metric + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
typestring + type represents whether the metric type is Utilization, Value, or AverageValue
+
true
averageUtilizationinteger + averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+
+ Format: int32
+
false
averageValueint or string + averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+
false
valueint or string + value is the target value of the metric (as a quantity).
+
false
+ + +### FlowCollector.spec.processor.metrics +[↩ Parent](#flowcollectorspecprocessor-1) + + + +Metrics define the processor configuration regarding metrics + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
disableAlerts[]enum + disableAlerts is a list of alerts that should be disabled. Possible values are: `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period. `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
+
false
ignoreTags[]string + ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads
+
+ Default: [egress packets]
+
false
serverobject + metricsServer endpoint configuration for Prometheus scraper
+
false
+ + +### FlowCollector.spec.processor.metrics.server +[↩ Parent](#flowcollectorspecprocessormetrics-1) + + + +metricsServer endpoint configuration for Prometheus scraper + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
portinteger + the prometheus HTTP port
+
+ Format: int32
+ Default: 9102
+ Minimum: 1
+ Maximum: 65535
+
false
tlsobject + TLS configuration.
+
false
+ + +### FlowCollector.spec.processor.metrics.server.tls +[↩ Parent](#flowcollectorspecprocessormetricsserver-1) + + + +TLS configuration. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
providedobject + TLS configuration.
+
false
typeenum + Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations
+
+ Enum: DISABLED, PROVIDED, AUTO
+ Default: DISABLED
+
false
+ + +### FlowCollector.spec.processor.metrics.server.tls.provided +[↩ Parent](#flowcollectorspecprocessormetricsservertls-1) + + + +TLS configuration. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certFilestring + certFile defines the path to the certificate file name within the config map or secret
+
false
certKeystring + certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+
false
namestring + name of the config map or secret containing certificates
+
false
namespacestring + namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+
+ Default:
+
false
typeenum + type for the certificate reference: "configmap" or "secret"
+
+ Enum: configmap, secret
+
false
+ + +### FlowCollector.spec.processor.resources +[↩ Parent](#flowcollectorspecprocessor-1) + + + +resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
limitsmap[string]int or string + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
requestsmap[string]int or string + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+
false
+ + +### FlowCollector.status +[↩ Parent](#flowcollector-1) + + + +FlowCollectorStatus defines the observed state of FlowCollector + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
conditions[]object + conditions represent the latest available observations of an object's state
+
true
namespacestring + namespace where console plugin and flowlogs-pipeline have been deployed.
+
false
+ + +### FlowCollector.status.conditions[index] +[↩ Parent](#flowcollectorstatus-1) + + + Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // other fields } diff --git a/main.go b/main.go index 754f92df3..bbf68bb32 100644 --- a/main.go +++ b/main.go @@ -46,6 +46,7 @@ import ( flowsv1alpha1 "github.com/netobserv/network-observability-operator/api/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/api/v1beta1" + flowsv1beta2 "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers" "github.com/netobserv/network-observability-operator/controllers/constants" //+kubebuilder:scaffold:imports @@ -64,6 +65,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(flowsv1alpha1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta1.AddToScheme(scheme)) + utilruntime.Must(flowsv1beta2.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(ascv2.AddToScheme(scheme)) utilruntime.Must(osv1alpha1.AddToScheme(scheme)) @@ -134,7 +136,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "FlowCollector") os.Exit(1) } - if err = (&flowsv1beta1.FlowCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&flowsv1beta2.FlowCollector{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create v1beta1 webhook", "webhook", "FlowCollector") os.Exit(1) } diff --git a/pkg/helper/comparators.go b/pkg/helper/comparators.go index d958ace0b..6eb28c11f 100644 --- a/pkg/helper/comparators.go +++ b/pkg/helper/comparators.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" ) diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index b34c402ab..124d59086 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -1,7 +1,7 @@ package helper import ( - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" ) @@ -39,22 +39,22 @@ func HPAEnabled(spec *flowslatest.FlowCollectorHPA) bool { } func LokiNoAuthToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthDisabled + return spec.Manual.AuthToken == flowslatest.LokiAuthDisabled } func LokiUseHostToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthUseHostToken + return spec.Manual.AuthToken == flowslatest.LokiAuthUseHostToken } func LokiForwardUserToken(spec *flowslatest.FlowCollectorLoki) bool { - return spec.AuthToken == flowslatest.LokiAuthForwardUserToken + return spec.Manual.AuthToken == flowslatest.LokiAuthForwardUserToken } func GetLokiStatusTLS(spec *flowslatest.FlowCollectorLoki) flowslatest.ClientTLS { - if spec.StatusURL != "" { - return spec.StatusTLS + if spec.Manual.StatusURL != "" { + return spec.Manual.StatusTLS } - return spec.TLS + return spec.Manual.TLS } func GetRecordTypes(processor *flowslatest.FlowCollectorFLP) []string { diff --git a/pkg/volumes/builder.go b/pkg/volumes/builder.go index 99fb60068..c2b86cda1 100644 --- a/pkg/volumes/builder.go +++ b/pkg/volumes/builder.go @@ -5,7 +5,7 @@ import ( corev1 "k8s.io/api/core/v1" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/watchers" ) diff --git a/pkg/watchers/object_ref.go b/pkg/watchers/object_ref.go index f1b5f431d..32c022b4e 100644 --- a/pkg/watchers/object_ref.go +++ b/pkg/watchers/object_ref.go @@ -1,7 +1,7 @@ package watchers import ( - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" ) type objectRef struct { diff --git a/pkg/watchers/watcher.go b/pkg/watchers/watcher.go index ca3c76568..2df428b93 100644 --- a/pkg/watchers/watcher.go +++ b/pkg/watchers/watcher.go @@ -14,7 +14,7 @@ import ( rec "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/helper" ) diff --git a/pkg/watchers/watcher_test.go b/pkg/watchers/watcher_test.go index a95128fde..23a956edb 100644 --- a/pkg/watchers/watcher_test.go +++ b/pkg/watchers/watcher_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - flowslatest "github.com/netobserv/network-observability-operator/api/v1beta1" + flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/test" "github.com/stretchr/testify/assert"