diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go
index 3ff8d6c2e..24913ab1e 100644
--- a/api/v1alpha1/flowcollector_webhook.go
+++ b/api/v1alpha1/flowcollector_webhook.go
@@ -22,6 +22,7 @@ import (
"github.com/netobserv/network-observability-operator/api/v1beta2"
utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion"
+ "github.com/netobserv/network-observability-operator/pkg/helper"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiconversion "k8s.io/apimachinery/pkg/conversion"
"sigs.k8s.io/controller-runtime/pkg/conversion"
@@ -41,16 +42,6 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error {
// Manually restore data.
restored := &v1beta2.FlowCollector{}
if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- // fallback on current loki config as Manual mode if metadata are not available
- dst.Spec.Loki.Mode = v1beta2.LokiModeManual
- dst.Spec.Loki.Manual.IngesterURL = r.Spec.Loki.URL
- dst.Spec.Loki.Manual.QuerierURL = r.Spec.Loki.QuerierURL
- dst.Spec.Loki.Manual.StatusURL = r.Spec.Loki.StatusURL
- dst.Spec.Loki.Manual.TenantID = r.Spec.Loki.TenantID
- dst.Spec.Loki.Manual.AuthToken = r.Spec.Loki.AuthToken
- if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&r.Spec.Loki.TLS, &dst.Spec.Loki.Manual.TLS, nil); err != nil {
- return fmt.Errorf("copying v1alplha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err)
- }
return err
}
@@ -62,14 +53,14 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error {
// Processor
dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes
- if restored.Spec.Processor.ConversationHeartbeatInterval != nil {
- dst.Spec.Processor.ConversationHeartbeatInterval = restored.Spec.Processor.ConversationHeartbeatInterval
+ if restored.Spec.Processor.Debug.ConversationHeartbeatInterval != nil {
+ dst.Spec.Processor.Debug.ConversationHeartbeatInterval = restored.Spec.Processor.Debug.ConversationHeartbeatInterval
}
- if restored.Spec.Processor.ConversationEndTimeout != nil {
- dst.Spec.Processor.ConversationEndTimeout = restored.Spec.Processor.ConversationEndTimeout
+ if restored.Spec.Processor.Debug.ConversationEndTimeout != nil {
+ dst.Spec.Processor.Debug.ConversationEndTimeout = restored.Spec.Processor.Debug.ConversationEndTimeout
}
- if restored.Spec.Processor.ConversationTerminatingTimeout != nil {
- dst.Spec.Processor.ConversationTerminatingTimeout = restored.Spec.Processor.ConversationTerminatingTimeout
+ if restored.Spec.Processor.Debug.ConversationTerminatingTimeout != nil {
+ dst.Spec.Processor.Debug.ConversationTerminatingTimeout = restored.Spec.Processor.Debug.ConversationTerminatingTimeout
}
if restored.Spec.Processor.Metrics.DisableAlerts != nil {
dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts
@@ -157,6 +148,16 @@ func Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, o
// we have new defined fields in v1beta2 not in v1alpha1
// nolint:golint,stylecheck,revive
func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error {
+ out.URL = helper.LokiIngesterURL(in)
+ out.QuerierURL = helper.LokiQuerierURL(in)
+ out.StatusURL = helper.LokiStatusURL(in)
+ out.TenantID = helper.LokiTenantID(in)
+ switch in.Mode {
+ case v1beta2.LokiModeManual:
+ out.AuthToken = in.Manual.AuthToken
+ case v1beta2.LokiModeLokiStack:
+ out.AuthToken = v1beta2.LokiAuthForwardUserToken
+ }
return autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s)
}
@@ -168,21 +169,21 @@ func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCol
}
// This function need to be manually created because conversion-gen not able to create it intentionally because
-// we have new defined fields in v1beta1 not in v1alpha1
+// we have new defined fields in v1beta2 not in v1alpha1
// nolint:golint,stylecheck,revive
func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error {
return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s)
}
// This function need to be manually created because conversion-gen not able to create it intentionally because
-// we have new defined fields in v1beta1 not in v1alpha1
+// we have new defined fields in v1beta2 not in v1alpha1
// nolint:golint,stylecheck,revive
func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error {
return autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s)
}
// This function need to be manually created because conversion-gen not able to create it intentionally because
-// we have new defined fields in v1beta1 not in v1alpha1
+// we have new defined fields in v1beta2 not in v1alpha1
// nolint:golint,stylecheck,revive
func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error {
return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s)
@@ -194,3 +195,49 @@ func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2
func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error {
return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s)
}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s apiconversion.Scope) error {
+ return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s)
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s apiconversion.Scope) error {
+ return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s)
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1alpha1_DebugConfig_To_v1beta2_DebugAgentConfig(in *DebugConfig, out *v1beta2.DebugAgentConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta2_DebugAgentConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugAgentConfig, out *DebugConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1alpha1_DebugConfig_To_v1beta2_DebugProcessorConfig(in *DebugConfig, out *v1beta2.DebugProcessorConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1alpha1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta2_DebugProcessorConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugProcessorConfig, out *DebugConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go
index 0462eb110..d4521c13f 100644
--- a/api/v1alpha1/zz_generated.conversion.go
+++ b/api/v1alpha1/zz_generated.conversion.go
@@ -78,16 +78,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope)
}); err != nil {
@@ -113,11 +103,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope)
}); err != nil {
@@ -128,11 +113,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope)
}); err != nil {
@@ -228,11 +208,41 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DebugConfig_To_v1beta2_DebugAgentConfig(a.(*DebugConfig), b.(*v1beta2.DebugAgentConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugProcessorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DebugConfig_To_v1beta2_DebugProcessorConfig(a.(*DebugConfig), b.(*v1beta2.DebugProcessorConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope)
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*v1beta2.DebugAgentConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_DebugAgentConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.DebugAgentConfig), b.(*DebugConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.DebugProcessorConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_DebugProcessorConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.DebugProcessorConfig), b.(*DebugConfig), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope)
}); err != nil {
@@ -379,26 +389,6 @@ func Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig
return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s)
}
-func autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error {
- out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env))
- return nil
-}
-
-// Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function.
-func Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error {
- return autoConvert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(in, out, s)
-}
-
-func autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error {
- out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env))
- return nil
-}
-
-// Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig is an autogenerated conversion function.
-func Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error {
- return autoConvert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(in, out, s)
-}
-
func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error {
if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil {
return err
@@ -486,13 +476,11 @@ func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1bet
}
func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error {
- if err := v1.Convert_bool_To_Pointer_bool(&in.Register, &out.Register, s); err != nil {
- return err
- }
+ // WARNING: in.Register requires manual conversion: does not exist in peer-type
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
- out.Port = in.Port
+ // WARNING: in.Port requires manual conversion: does not exist in peer-type
out.ImagePullPolicy = in.ImagePullPolicy
out.Resources = in.Resources
out.LogLevel = in.LogLevel
@@ -506,20 +494,11 @@ func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorCon
return nil
}
-// Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function.
-func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error {
- return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s)
-}
-
func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error {
// WARNING: in.Enable requires manual conversion: does not exist in peer-type
- if err := v1.Convert_Pointer_bool_To_bool(&in.Register, &out.Register, s); err != nil {
- return err
- }
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
- out.Port = in.Port
out.ImagePullPolicy = in.ImagePullPolicy
out.Resources = in.Resources
out.LogLevel = in.LogLevel
@@ -530,6 +509,7 @@ func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon
return err
}
out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters))
+ // WARNING: in.Debug requires manual conversion: does not exist in peer-type
return nil
}
@@ -544,7 +524,7 @@ func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *Flo
out.LogLevel = in.LogLevel
out.Privileged = in.Privileged
out.KafkaBatchSize = in.KafkaBatchSize
- if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugAgentConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
return nil
@@ -566,7 +546,7 @@ func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1b
out.LogLevel = in.LogLevel
out.Privileged = in.Privileged
out.KafkaBatchSize = in.KafkaBatchSize
- if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta2_DebugAgentConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
// WARNING: in.Features requires manual conversion: does not exist in peer-type
@@ -596,21 +576,17 @@ func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter
}
func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error {
- out.Port = in.Port
- out.HealthPort = in.HealthPort
- out.ProfilePort = in.ProfilePort
+ // WARNING: in.Port requires manual conversion: does not exist in peer-type
+ // WARNING: in.HealthPort requires manual conversion: does not exist in peer-type
+ // WARNING: in.ProfilePort requires manual conversion: does not exist in peer-type
out.ImagePullPolicy = in.ImagePullPolicy
if err := Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil {
return err
}
out.LogLevel = in.LogLevel
out.Resources = in.Resources
- if err := v1.Convert_bool_To_Pointer_bool(&in.EnableKubeProbes, &out.EnableKubeProbes, s); err != nil {
- return err
- }
- if err := v1.Convert_bool_To_Pointer_bool(&in.DropUnusedFields, &out.DropUnusedFields, s); err != nil {
- return err
- }
+ // WARNING: in.EnableKubeProbes requires manual conversion: does not exist in peer-type
+ // WARNING: in.DropUnusedFields requires manual conversion: does not exist in peer-type
if err := v1.Convert_int32_To_Pointer_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil {
return err
}
@@ -619,33 +595,19 @@ func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowC
}
out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity
out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize
- if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugProcessorConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
return nil
}
-// Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function.
-func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error {
- return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s)
-}
-
func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error {
- out.Port = in.Port
- out.HealthPort = in.HealthPort
- out.ProfilePort = in.ProfilePort
out.ImagePullPolicy = in.ImagePullPolicy
if err := Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil {
return err
}
out.LogLevel = in.LogLevel
out.Resources = in.Resources
- if err := v1.Convert_Pointer_bool_To_bool(&in.EnableKubeProbes, &out.EnableKubeProbes, s); err != nil {
- return err
- }
- if err := v1.Convert_Pointer_bool_To_bool(&in.DropUnusedFields, &out.DropUnusedFields, s); err != nil {
- return err
- }
if err := v1.Convert_Pointer_int32_To_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil {
return err
}
@@ -655,11 +617,8 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet
out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity
out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize
// WARNING: in.LogTypes requires manual conversion: does not exist in peer-type
- // WARNING: in.ConversationHeartbeatInterval requires manual conversion: does not exist in peer-type
- // WARNING: in.ConversationEndTimeout requires manual conversion: does not exist in peer-type
- // WARNING: in.ConversationTerminatingTimeout requires manual conversion: does not exist in peer-type
// WARNING: in.ClusterName requires manual conversion: does not exist in peer-type
- if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta2_DebugProcessorConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
return nil
@@ -806,23 +765,15 @@ func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *Flo
// WARNING: in.StatusURL requires manual conversion: does not exist in peer-type
// WARNING: in.TenantID requires manual conversion: does not exist in peer-type
// WARNING: in.AuthToken requires manual conversion: does not exist in peer-type
- if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.BatchWait, &out.BatchWait, s); err != nil {
- return err
- }
- out.BatchSize = in.BatchSize
+ // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type
+ // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type
if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.Timeout, &out.Timeout, s); err != nil {
return err
}
- if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.MinBackoff, &out.MinBackoff, s); err != nil {
- return err
- }
- if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.MaxBackoff, &out.MaxBackoff, s); err != nil {
- return err
- }
- if err := v1.Convert_int32_To_Pointer_int32(&in.MaxRetries, &out.MaxRetries, s); err != nil {
- return err
- }
- out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels))
+ // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type
+ // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type
+ // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type
+ // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type
// WARNING: in.TLS requires manual conversion: does not exist in peer-type
return nil
}
@@ -834,23 +785,9 @@ func autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1b
// WARNING: in.Monolith requires manual conversion: does not exist in peer-type
// WARNING: in.LokiStack requires manual conversion: does not exist in peer-type
// WARNING: in.Enable requires manual conversion: does not exist in peer-type
- if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.BatchWait, &out.BatchWait, s); err != nil {
- return err
- }
- out.BatchSize = in.BatchSize
if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.Timeout, &out.Timeout, s); err != nil {
return err
}
- if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.MinBackoff, &out.MinBackoff, s); err != nil {
- return err
- }
- if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.MaxBackoff, &out.MaxBackoff, s); err != nil {
- return err
- }
- if err := v1.Convert_Pointer_int32_To_int32(&in.MaxRetries, &out.MaxRetries, s); err != nil {
- return err
- }
- out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels))
return nil
}
diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go
index e40c6a283..f45e8f88f 100644
--- a/api/v1beta1/flowcollector_webhook.go
+++ b/api/v1beta1/flowcollector_webhook.go
@@ -143,3 +143,67 @@ func Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2.
func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error {
return autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s)
}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s apiconversion.Scope) error {
+ out.Debug.Register = in.Register
+ out.Debug.Port = in.Port
+ return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s)
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error {
+ out.Register = in.Debug.Register
+ out.Port = in.Debug.Port
+ return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s)
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s apiconversion.Scope) error {
+ out.Debug.Port = in.Port
+ out.Debug.HealthPort = in.HealthPort
+ out.Debug.EnableKubeProbes = in.EnableKubeProbes
+ out.Debug.DropUnusedFields = in.DropUnusedFields
+ out.Debug.ConversationHeartbeatInterval = in.ConversationHeartbeatInterval
+ out.Debug.ConversationEndTimeout = in.ConversationEndTimeout
+ out.Debug.ConversationTerminatingTimeout = in.ConversationTerminatingTimeout
+ return autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s)
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta1_DebugConfig_To_v1beta2_DebugAgentConfig(in *DebugConfig, out *v1beta2.DebugAgentConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta2_DebugAgentConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugAgentConfig, out *DebugConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta1_DebugConfig_To_v1beta2_DebugProcessorConfig(in *DebugConfig, out *v1beta2.DebugProcessorConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
+
+// This function need to be manually created because conversion-gen not able to create it intentionally because
+// we have new defined fields in v1beta2 not in v1beta1
+// nolint:golint,stylecheck,revive
+func Convert_v1beta2_DebugProcessorConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugProcessorConfig, out *DebugConfig, s apiconversion.Scope) error {
+ out.Env = in.Env
+ return nil
+}
diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go
index 581c97d15..b53ddf366 100644
--- a/api/v1beta1/zz_generated.conversion.go
+++ b/api/v1beta1/zz_generated.conversion.go
@@ -78,16 +78,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(a.(*DebugConfig), b.(*v1beta2.DebugConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta2.DebugConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(a.(*v1beta2.DebugConfig), b.(*DebugConfig), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope)
}); err != nil {
@@ -123,16 +113,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope)
}); err != nil {
@@ -153,11 +133,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope)
- }); err != nil {
- return err
- }
if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope)
}); err != nil {
@@ -278,16 +253,51 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DebugConfig_To_v1beta2_DebugAgentConfig(a.(*DebugConfig), b.(*v1beta2.DebugAgentConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.DebugProcessorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DebugConfig_To_v1beta2_DebugProcessorConfig(a.(*DebugConfig), b.(*v1beta2.DebugProcessorConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope)
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*v1beta2.DebugAgentConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_DebugAgentConfig_To_v1beta1_DebugConfig(a.(*v1beta2.DebugAgentConfig), b.(*DebugConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.DebugProcessorConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_DebugProcessorConfig_To_v1beta1_DebugConfig(a.(*v1beta2.DebugProcessorConfig), b.(*DebugConfig), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope)
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope)
}); err != nil {
@@ -405,26 +415,6 @@ func Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(
return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig(in, out, s)
}
-func autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error {
- out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env))
- return nil
-}
-
-// Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig is an autogenerated conversion function.
-func Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in *DebugConfig, out *v1beta2.DebugConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(in, out, s)
-}
-
-func autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error {
- out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env))
- return nil
-}
-
-// Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig is an autogenerated conversion function.
-func Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in *v1beta2.DebugConfig, out *DebugConfig, s conversion.Scope) error {
- return autoConvert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(in, out, s)
-}
-
func autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error {
if err := Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil {
return err
@@ -540,9 +530,9 @@ func Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta
func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error {
out.Enable = (*bool)(unsafe.Pointer(in.Enable))
- out.Register = (*bool)(unsafe.Pointer(in.Register))
+ // WARNING: in.Register requires manual conversion: does not exist in peer-type
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
- out.Port = in.Port
+ // WARNING: in.Port requires manual conversion: does not exist in peer-type
out.ImagePullPolicy = in.ImagePullPolicy
out.Resources = in.Resources
out.LogLevel = in.LogLevel
@@ -556,16 +546,9 @@ func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorCons
return nil
}
-// Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin is an autogenerated conversion function.
-func Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error {
- return autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s)
-}
-
func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error {
out.Enable = (*bool)(unsafe.Pointer(in.Enable))
- out.Register = (*bool)(unsafe.Pointer(in.Register))
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
- out.Port = in.Port
out.ImagePullPolicy = in.ImagePullPolicy
out.Resources = in.Resources
out.LogLevel = in.LogLevel
@@ -576,14 +559,10 @@ func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorCons
return err
}
out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters))
+ // WARNING: in.Debug requires manual conversion: does not exist in peer-type
return nil
}
-// Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin is an autogenerated conversion function.
-func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error {
- return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s)
-}
-
func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error {
out.ImagePullPolicy = in.ImagePullPolicy
out.Resources = in.Resources
@@ -595,7 +574,7 @@ func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *Flow
out.LogLevel = in.LogLevel
out.Privileged = in.Privileged
out.KafkaBatchSize = in.KafkaBatchSize
- if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugAgentConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
out.Features = *(*[]v1beta2.AgentFeature)(unsafe.Pointer(&in.Features))
@@ -618,7 +597,7 @@ func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1be
out.LogLevel = in.LogLevel
out.Privileged = in.Privileged
out.KafkaBatchSize = in.KafkaBatchSize
- if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta2_DebugAgentConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
out.Features = *(*[]AgentFeature)(unsafe.Pointer(&in.Features))
@@ -663,17 +642,17 @@ func Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *
}
func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error {
- out.Port = in.Port
- out.HealthPort = in.HealthPort
- out.ProfilePort = in.ProfilePort
+ // WARNING: in.Port requires manual conversion: does not exist in peer-type
+ // WARNING: in.HealthPort requires manual conversion: does not exist in peer-type
+ // WARNING: in.ProfilePort requires manual conversion: does not exist in peer-type
out.ImagePullPolicy = in.ImagePullPolicy
if err := Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil {
return err
}
out.LogLevel = in.LogLevel
out.Resources = in.Resources
- out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes))
- out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields))
+ // WARNING: in.EnableKubeProbes requires manual conversion: does not exist in peer-type
+ // WARNING: in.DropUnusedFields requires manual conversion: does not exist in peer-type
out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas))
if err := Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil {
return err
@@ -681,33 +660,23 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCo
out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity
out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize
out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes))
- out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval))
- out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout))
- out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout))
+ // WARNING: in.ConversationHeartbeatInterval requires manual conversion: does not exist in peer-type
+ // WARNING: in.ConversationEndTimeout requires manual conversion: does not exist in peer-type
+ // WARNING: in.ConversationTerminatingTimeout requires manual conversion: does not exist in peer-type
out.ClusterName = in.ClusterName
- if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugProcessorConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
return nil
}
-// Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function.
-func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error {
- return autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s)
-}
-
func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error {
- out.Port = in.Port
- out.HealthPort = in.HealthPort
- out.ProfilePort = in.ProfilePort
out.ImagePullPolicy = in.ImagePullPolicy
if err := Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil {
return err
}
out.LogLevel = in.LogLevel
out.Resources = in.Resources
- out.EnableKubeProbes = (*bool)(unsafe.Pointer(in.EnableKubeProbes))
- out.DropUnusedFields = (*bool)(unsafe.Pointer(in.DropUnusedFields))
out.KafkaConsumerReplicas = (*int32)(unsafe.Pointer(in.KafkaConsumerReplicas))
if err := Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil {
return err
@@ -715,11 +684,8 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta
out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity
out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize
out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes))
- out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval))
- out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout))
- out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout))
out.ClusterName = in.ClusterName
- if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
+ if err := Convert_v1beta2_DebugProcessorConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil {
return err
}
return nil
@@ -892,13 +858,13 @@ func autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *Flow
// WARNING: in.StatusURL requires manual conversion: does not exist in peer-type
// WARNING: in.TenantID requires manual conversion: does not exist in peer-type
// WARNING: in.AuthToken requires manual conversion: does not exist in peer-type
- out.BatchWait = (*v1.Duration)(unsafe.Pointer(in.BatchWait))
- out.BatchSize = in.BatchSize
+ // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type
+ // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type
out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout))
- out.MinBackoff = (*v1.Duration)(unsafe.Pointer(in.MinBackoff))
- out.MaxBackoff = (*v1.Duration)(unsafe.Pointer(in.MaxBackoff))
- out.MaxRetries = (*int32)(unsafe.Pointer(in.MaxRetries))
- out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels))
+ // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type
+ // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type
+ // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type
+ // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type
// WARNING: in.TLS requires manual conversion: does not exist in peer-type
// WARNING: in.StatusTLS requires manual conversion: does not exist in peer-type
return nil
@@ -911,13 +877,7 @@ func autoConvert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1be
// WARNING: in.Monolith requires manual conversion: does not exist in peer-type
// WARNING: in.LokiStack requires manual conversion: does not exist in peer-type
out.Enable = (*bool)(unsafe.Pointer(in.Enable))
- out.BatchWait = (*v1.Duration)(unsafe.Pointer(in.BatchWait))
- out.BatchSize = in.BatchSize
out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout))
- out.MinBackoff = (*v1.Duration)(unsafe.Pointer(in.MinBackoff))
- out.MaxBackoff = (*v1.Duration)(unsafe.Pointer(in.MaxBackoff))
- out.MaxRetries = (*int32)(unsafe.Pointer(in.MaxRetries))
- out.StaticLabels = *(*map[string]string)(unsafe.Pointer(&in.StaticLabels))
return nil
}
diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go
index 0bc437a9e..d294ee62d 100644
--- a/api/v1beta2/flowcollector_types.go
+++ b/api/v1beta2/flowcollector_types.go
@@ -229,7 +229,7 @@ type FlowCollectorEBPF struct {
// This section is aimed exclusively for debugging and fine-grained performance optimizations,
// such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.
// +optional
- Debug DebugConfig `json:"debug,omitempty"`
+ Debug DebugAgentConfig `json:"debug,omitempty"`
// List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
// - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
@@ -372,26 +372,6 @@ const (
type FlowCollectorFLP struct {
// Important: Run "make generate" to regenerate code after modifying this file
- //+kubebuilder:validation:Minimum=1025
- //+kubebuilder:validation:Maximum=65535
- //+kubebuilder:default:=2055
- // Port of the flow collector (host port).
- // By convention, some values are forbidden. It must be greater than 1024 and different from
- // 4500, 4789 and 6081.
- Port int32 `json:"port,omitempty"`
-
- //+kubebuilder:validation:Minimum=1
- //+kubebuilder:validation:Maximum=65535
- //+kubebuilder:default:=8080
- // `healthPort` is a collector HTTP port in the Pod that exposes the health check API
- HealthPort int32 `json:"healthPort,omitempty"`
-
- //+kubebuilder:validation:Minimum=0
- //+kubebuilder:validation:Maximum=65535
- //+optional
- // `profilePort` allows setting up a Go pprof profiler listening to this port
- ProfilePort int32 `json:"profilePort,omitempty"`
-
//+kubebuilder:validation:Enum=IfNotPresent;Always;Never
//+kubebuilder:default:=IfNotPresent
// `imagePullPolicy` is the Kubernetes pull policy for the image defined above
@@ -411,14 +391,6 @@ type FlowCollectorFLP struct {
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
- //+kubebuilder:default:=true
- // `enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes
- EnableKubeProbes *bool `json:"enableKubeProbes,omitempty"`
-
- //+kubebuilder:default:=true
- // `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.
- DropUnusedFields *bool `json:"dropUnusedFields,omitempty"`
-
//+kubebuilder:validation:Minimum=0
//+kubebuilder:default:=3
// `kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages.
@@ -450,22 +422,6 @@ type FlowCollectorFLP struct {
// +kubebuilder:default:=FLOWS
LogTypes *string `json:"logTypes,omitempty"`
- //+kubebuilder:default:="30s"
- // +optional
- // `conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation
- ConversationHeartbeatInterval *metav1.Duration `json:"conversationHeartbeatInterval,omitempty"`
-
- //+kubebuilder:default:="10s"
- // +optional
- // `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended.
- // This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).
- ConversationEndTimeout *metav1.Duration `json:"conversationEndTimeout,omitempty"`
-
- //+kubebuilder:default:="5s"
- // +optional
- // `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.
- ConversationTerminatingTimeout *metav1.Duration `json:"conversationTerminatingTimeout,omitempty"`
-
//+kubebuilder:default:=""
// +optional
// `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.
@@ -475,7 +431,7 @@ type FlowCollectorFLP struct {
// This section is aimed exclusively for debugging and fine-grained performance optimizations,
// such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.
// +optional
- Debug DebugConfig `json:"debug,omitempty"`
+ Debug DebugProcessorConfig `json:"debug,omitempty"`
}
const (
@@ -635,37 +591,10 @@ type FlowCollectorLoki struct {
// Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation.
Enable *bool `json:"enable,omitempty"`
- //+kubebuilder:default:="1s"
- // `batchWait` is the maximum time to wait before sending a batch.
- BatchWait *metav1.Duration `json:"batchWait,omitempty"` // Warning: keep as pointer, else default is ignored
-
- //+kubebuilder:validation:Minimum=1
- //+kubebuilder:default:=102400
- // `batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.
- BatchSize int64 `json:"batchSize,omitempty"`
-
//+kubebuilder:default:="10s"
// `timeout` is the maximum time connection / request limit.
// A timeout of zero means no timeout.
Timeout *metav1.Duration `json:"timeout,omitempty"` // Warning: keep as pointer, else default is ignored
-
- //+kubebuilder:default="1s"
- // `minBackoff` is the initial backoff time for client connection between retries.
- MinBackoff *metav1.Duration `json:"minBackoff,omitempty"` // Warning: keep as pointer, else default is ignored
-
- //+kubebuilder:default="5s"
- // `maxBackoff` is the maximum backoff time for client connection between retries.
- MaxBackoff *metav1.Duration `json:"maxBackoff,omitempty"` // Warning: keep as pointer, else default is ignored
-
- //+kubebuilder:validation:Minimum=0
- //+kubebuilder:default:=2
- // `maxRetries` is the maximum number of retries for client connections.
- MaxRetries *int32 `json:"maxRetries,omitempty"`
-
- //+kubebuilder:default:={"app":"netobserv-flowcollector"}
- // +optional
- // `staticLabels` is a map of common labels to set on each flow.
- StaticLabels map[string]string `json:"staticLabels"`
}
// FlowCollectorConsolePlugin defines the desired ConsolePlugin state of FlowCollector
@@ -677,23 +606,11 @@ type FlowCollectorConsolePlugin struct {
// `spec.Loki.enable` must also be `true`
Enable *bool `json:"enable,omitempty"`
- //+kubebuilder:default:=true
- // `register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator.
- // When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command:
- // `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'`
- Register *bool `json:"register,omitempty"`
-
//+kubebuilder:validation:Minimum=0
//+kubebuilder:default:=1
// `replicas` defines the number of replicas (pods) to start.
Replicas *int32 `json:"replicas,omitempty"`
- //+kubebuilder:validation:Minimum=1
- //+kubebuilder:validation:Maximum=65535
- //+kubebuilder:default:=9001
- // `port` is the plugin service port. Do not use 9002, which is reserved for metrics.
- Port int32 `json:"port,omitempty"`
-
//+kubebuilder:validation:Enum=IfNotPresent;Always;Never
//+kubebuilder:default:=IfNotPresent
// `imagePullPolicy` is the Kubernetes pull policy for the image defined above
@@ -722,6 +639,12 @@ type FlowCollectorConsolePlugin struct {
// +optional
// `quickFilters` configures quick filter presets for the Console plugin
QuickFilters []QuickFilter `json:"quickFilters"`
+
+ // `debug` allows setting some aspects of the internal configuration of the console plugin.
+ // This section is aimed exclusively for debugging and fine-grained performance optimizations,
+ // such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.
+ // +optional
+ Debug DebugPluginConfig `json:"debug,omitempty"`
}
// Configuration of the port to service name translation feature of the console plugin
@@ -864,15 +787,127 @@ type SASLConfig struct {
ClientSecretReference FileReference `json:"clientSecretReference,omitempty"`
}
-// `DebugConfig` allows tweaking some aspects of the internal configuration of the agent and FLP.
+// `DebugConfig` allows tweaking some aspects of the internal configuration of the agent.
+// They are aimed exclusively for debugging. Users setting these values do it at their own risk.
+type DebugAgentConfig struct {
+ // `env` allows passing custom environment variables to underlying components. Useful for passing
+ // some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be
+ // publicly exposed as part of the FlowCollector descriptor, as they are only useful
+ // in edge debug or support scenarios.
+ //+optional
+ Env map[string]string `json:"env,omitempty"`
+}
+
+// `DebugConfig` allows tweaking some aspects of the internal configuration of the processor.
// They are aimed exclusively for debugging. Users setting these values do it at their own risk.
-type DebugConfig struct {
+type DebugProcessorConfig struct {
// `env` allows passing custom environment variables to underlying components. Useful for passing
// some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be
// publicly exposed as part of the FlowCollector descriptor, as they are only useful
// in edge debug or support scenarios.
//+optional
Env map[string]string `json:"env,omitempty"`
+
+ //+kubebuilder:validation:Minimum=1025
+ //+kubebuilder:validation:Maximum=65535
+ //+kubebuilder:default:=2055
+ // Port of the flow collector (host port).
+ // By convention, some values are forbidden. It must be greater than 1024 and different from
+ // 4500, 4789 and 6081.
+ Port int32 `json:"port,omitempty"`
+
+ //+kubebuilder:validation:Minimum=1
+ //+kubebuilder:validation:Maximum=65535
+ //+kubebuilder:default:=8080
+ // `healthPort` is a collector HTTP port in the Pod that exposes the health check API
+ HealthPort int32 `json:"healthPort,omitempty"`
+
+ //+kubebuilder:validation:Minimum=0
+ //+kubebuilder:validation:Maximum=65535
+ //+optional
+ // `profilePort` allows setting up a Go pprof profiler listening to this port
+ ProfilePort int32 `json:"profilePort,omitempty"`
+
+ //+kubebuilder:default:=true
+ // `enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes
+ EnableKubeProbes *bool `json:"enableKubeProbes,omitempty"`
+
+ //+kubebuilder:default:=true
+ // `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.
+ DropUnusedFields *bool `json:"dropUnusedFields,omitempty"`
+
+ //+kubebuilder:default:="30s"
+ // +optional
+ // `conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation
+ ConversationHeartbeatInterval *metav1.Duration `json:"conversationHeartbeatInterval,omitempty"`
+
+ //+kubebuilder:default:="10s"
+ // +optional
+ // `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended.
+ // This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).
+ ConversationEndTimeout *metav1.Duration `json:"conversationEndTimeout,omitempty"`
+
+ //+kubebuilder:default:="5s"
+ // +optional
+ // `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.
+ ConversationTerminatingTimeout *metav1.Duration `json:"conversationTerminatingTimeout,omitempty"`
+
+ //+kubebuilder:default:="1s"
+ // `batchWait` is the maximum time to wait before sending a batch.
+ BatchWait *metav1.Duration `json:"batchWait,omitempty"` // Warning: keep as pointer, else default is ignored
+
+ //+kubebuilder:validation:Minimum=1
+ //+kubebuilder:default:=102400
+ // `batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.
+ BatchSize int64 `json:"batchSize,omitempty"`
+
+ //+kubebuilder:default="1s"
+ // `minBackoff` is the initial backoff time for client connection between retries.
+ MinBackoff *metav1.Duration `json:"minBackoff,omitempty"` // Warning: keep as pointer, else default is ignored
+
+ //+kubebuilder:default="5s"
+ // `maxBackoff` is the maximum backoff time for client connection between retries.
+ MaxBackoff *metav1.Duration `json:"maxBackoff,omitempty"` // Warning: keep as pointer, else default is ignored
+
+ //+kubebuilder:validation:Minimum=0
+ //+kubebuilder:default:=2
+ // `maxRetries` is the maximum number of retries for client connections.
+ MaxRetries *int32 `json:"maxRetries,omitempty"`
+
+ //+kubebuilder:default:={"app":"netobserv-flowcollector"}
+ // +optional
+ // `staticLabels` is a map of common labels to set on each flow.
+ StaticLabels map[string]string `json:"staticLabels"`
+}
+
+// `DebugConfig` allows tweaking some aspects of the internal configuration of the console plugin.
+// They are aimed exclusively for debugging. Users setting these values do it at their own risk.
+type DebugPluginConfig struct {
+ // `env` allows passing custom environment variables to underlying components. Useful for passing
+ // some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be
+ // publicly exposed as part of the FlowCollector descriptor, as they are only useful
+ // in edge debug or support scenarios.
+ //+optional
+ Env map[string]string `json:"env,omitempty"`
+
+ // `args` allows passing custom arguments to underlying components. Useful for overriding
+ // some parameters, such as an url or a configuration path, that should not be
+ // publicly exposed as part of the FlowCollector descriptor, as they are only useful
+ // in edge debug or support scenarios.
+ //+optional
+ Args []string `json:"args,omitempty"`
+
+ //+kubebuilder:default:=true
+ // `register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator.
+ // When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command:
+ // `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'`
+ Register *bool `json:"register,omitempty"`
+
+ //+kubebuilder:validation:Minimum=1
+ //+kubebuilder:validation:Maximum=65535
+ //+kubebuilder:default:=9001
+ // `port` is the plugin service port. Do not use 9002, which is reserved for metrics.
+ Port int32 `json:"port,omitempty"`
}
// Add more exporter types below
diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go
index c8853b977..1e8c1dec7 100644
--- a/api/v1beta2/zz_generated.deepcopy.go
+++ b/api/v1beta2/zz_generated.deepcopy.go
@@ -102,7 +102,7 @@ func (in *ConsolePluginPortConfig) DeepCopy() *ConsolePluginPortConfig {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DebugConfig) DeepCopyInto(out *DebugConfig) {
+func (in *DebugAgentConfig) DeepCopyInto(out *DebugAgentConfig) {
*out = *in
if in.Env != nil {
in, out := &in.Env, &out.Env
@@ -113,12 +113,118 @@ func (in *DebugConfig) DeepCopyInto(out *DebugConfig) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugConfig.
-func (in *DebugConfig) DeepCopy() *DebugConfig {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugAgentConfig.
+func (in *DebugAgentConfig) DeepCopy() *DebugAgentConfig {
if in == nil {
return nil
}
- out := new(DebugConfig)
+ out := new(DebugAgentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DebugPluginConfig) DeepCopyInto(out *DebugPluginConfig) {
+ *out = *in
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Args != nil {
+ in, out := &in.Args, &out.Args
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Register != nil {
+ in, out := &in.Register, &out.Register
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugPluginConfig.
+func (in *DebugPluginConfig) DeepCopy() *DebugPluginConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DebugPluginConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DebugProcessorConfig) DeepCopyInto(out *DebugProcessorConfig) {
+ *out = *in
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.EnableKubeProbes != nil {
+ in, out := &in.EnableKubeProbes, &out.EnableKubeProbes
+ *out = new(bool)
+ **out = **in
+ }
+ if in.DropUnusedFields != nil {
+ in, out := &in.DropUnusedFields, &out.DropUnusedFields
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ConversationHeartbeatInterval != nil {
+ in, out := &in.ConversationHeartbeatInterval, &out.ConversationHeartbeatInterval
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.ConversationEndTimeout != nil {
+ in, out := &in.ConversationEndTimeout, &out.ConversationEndTimeout
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.ConversationTerminatingTimeout != nil {
+ in, out := &in.ConversationTerminatingTimeout, &out.ConversationTerminatingTimeout
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.BatchWait != nil {
+ in, out := &in.BatchWait, &out.BatchWait
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.MinBackoff != nil {
+ in, out := &in.MinBackoff, &out.MinBackoff
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.MaxBackoff != nil {
+ in, out := &in.MaxBackoff, &out.MaxBackoff
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.MaxRetries != nil {
+ in, out := &in.MaxRetries, &out.MaxRetries
+ *out = new(int32)
+ **out = **in
+ }
+ if in.StaticLabels != nil {
+ in, out := &in.StaticLabels, &out.StaticLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugProcessorConfig.
+func (in *DebugProcessorConfig) DeepCopy() *DebugProcessorConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DebugProcessorConfig)
in.DeepCopyInto(out)
return out
}
@@ -216,11 +322,6 @@ func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlug
*out = new(bool)
**out = **in
}
- if in.Register != nil {
- in, out := &in.Register, &out.Register
- *out = new(bool)
- **out = **in
- }
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
@@ -236,6 +337,7 @@ func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlug
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ in.Debug.DeepCopyInto(&out.Debug)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorConsolePlugin.
@@ -307,16 +409,6 @@ func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) {
*out = *in
in.Metrics.DeepCopyInto(&out.Metrics)
in.Resources.DeepCopyInto(&out.Resources)
- if in.EnableKubeProbes != nil {
- in, out := &in.EnableKubeProbes, &out.EnableKubeProbes
- *out = new(bool)
- **out = **in
- }
- if in.DropUnusedFields != nil {
- in, out := &in.DropUnusedFields, &out.DropUnusedFields
- *out = new(bool)
- **out = **in
- }
if in.KafkaConsumerReplicas != nil {
in, out := &in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas
*out = new(int32)
@@ -328,21 +420,6 @@ func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) {
*out = new(string)
**out = **in
}
- if in.ConversationHeartbeatInterval != nil {
- in, out := &in.ConversationHeartbeatInterval, &out.ConversationHeartbeatInterval
- *out = new(v1.Duration)
- **out = **in
- }
- if in.ConversationEndTimeout != nil {
- in, out := &in.ConversationEndTimeout, &out.ConversationEndTimeout
- *out = new(v1.Duration)
- **out = **in
- }
- if in.ConversationTerminatingTimeout != nil {
- in, out := &in.ConversationTerminatingTimeout, &out.ConversationTerminatingTimeout
- *out = new(v1.Duration)
- **out = **in
- }
in.Debug.DeepCopyInto(&out.Debug)
}
@@ -488,38 +565,11 @@ func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) {
*out = new(bool)
**out = **in
}
- if in.BatchWait != nil {
- in, out := &in.BatchWait, &out.BatchWait
- *out = new(v1.Duration)
- **out = **in
- }
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
- if in.MinBackoff != nil {
- in, out := &in.MinBackoff, &out.MinBackoff
- *out = new(v1.Duration)
- **out = **in
- }
- if in.MaxBackoff != nil {
- in, out := &in.MaxBackoff, &out.MaxBackoff
- *out = new(v1.Duration)
- **out = **in
- }
- if in.MaxRetries != nil {
- in, out := &in.MaxRetries, &out.MaxRetries
- *out = new(int32)
- **out = **in
- }
- if in.StaticLabels != nil {
- in, out := &in.StaticLabels, &out.StaticLabels
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorLoki.
diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
index 788ef5fe7..d244362ab 100644
--- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
@@ -5776,6 +5776,51 @@ spec:
- ENABLED
type: string
type: object
+ debug:
+ description: '`debug` allows setting some aspects of the internal
+ configuration of the console plugin. This section is aimed exclusively
+ for debugging and fine-grained performance optimizations, such
+ as `GOGC` and `GOMAXPROCS` env vars. Users setting its values
+ do it at their own risk.'
+ properties:
+ args:
+ description: '`args` allows passing custom arguments to underlying
+ components. Useful for overriding some parameters, such
+ as an url or a configuration path, that should not be publicly
+ exposed as part of the FlowCollector descriptor, as they
+ are only useful in edge debug or support scenarios.'
+ items:
+ type: string
+ type: array
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables
+ to underlying components. Useful for passing some very concrete
+ performance-tuning options, such as `GOGC` and `GOMAXPROCS`,
+ that should not be publicly exposed as part of the FlowCollector
+ descriptor, as they are only useful in edge debug or support
+ scenarios.'
+ type: object
+ port:
+ default: 9001
+ description: '`port` is the plugin service port. Do not use
+ 9002, which is reserved for metrics.'
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ register:
+ default: true
+ description: '`register` allows, when set to `true`, to automatically
+ register the provided console plugin with the OpenShift
+ Console operator. When set to `false`, you can still register
+ it manually by editing console.operator.openshift.io/cluster
+ with the following command: `oc patch console.operator.openshift.io
+ cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-",
+ "value": "netobserv-plugin"}]''`'
+ type: boolean
+ type: object
enable:
default: true
description: Enables the console plugin deployment. `spec.Loki.enable`
@@ -5802,14 +5847,6 @@ spec:
- fatal
- panic
type: string
- port:
- default: 9001
- description: '`port` is the plugin service port. Do not use 9002,
- which is reserved for metrics.'
- format: int32
- maximum: 65535
- minimum: 1
- type: integer
portNaming:
default:
enable: true
@@ -5874,15 +5911,6 @@ spec:
- name
type: object
type: array
- register:
- default: true
- description: '`register` allows, when set to `true`, to automatically
- register the provided console plugin with the OpenShift Console
- operator. When set to `false`, you can still register it manually
- by editing console.operator.openshift.io/cluster with the following
- command: `oc patch console.operator.openshift.io cluster --type=''json''
- -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`'
- type: boolean
replicas:
default: 1
description: '`replicas` defines the number of replicas (pods)
@@ -6346,18 +6374,6 @@ spec:
loki:
description: '`loki`, the flow store, client settings.'
properties:
- batchSize:
- default: 102400
- description: '`batchSize` is the maximum batch size (in bytes)
- of logs to accumulate before sending.'
- format: int64
- minimum: 1
- type: integer
- batchWait:
- default: 1s
- description: '`batchWait` is the maximum time to wait before sending
- a batch.'
- type: string
distributed:
description: Loki configuration for DISTRIBUTED mode. This is
usefull for an easy microservices loki config. It will be ignored
@@ -6692,23 +6708,6 @@ spec:
type: object
type: object
type: object
- maxBackoff:
- default: 5s
- description: '`maxBackoff` is the maximum backoff time for client
- connection between retries.'
- type: string
- maxRetries:
- default: 2
- description: '`maxRetries` is the maximum number of retries for
- client connections.'
- format: int32
- minimum: 0
- type: integer
- minBackoff:
- default: 1s
- description: '`minBackoff` is the initial backoff time for client
- connection between retries.'
- type: string
mode:
default: MANUAL
enum:
@@ -6811,14 +6810,6 @@ spec:
service that point both ingester and querier.'
type: string
type: object
- staticLabels:
- additionalProperties:
- type: string
- default:
- app: netobserv-flowcollector
- description: '`staticLabels` is a map of common labels to set
- on each flow.'
- type: object
timeout:
default: 10s
description: '`timeout` is the maximum time connection / request
@@ -6841,24 +6832,6 @@ spec:
in the flows data. This is useful in a multi-cluster context.
When using OpenShift, leave empty to make it automatically determined.'
type: string
- conversationEndTimeout:
- default: 10s
- description: '`conversationEndTimeout` is the time to wait after
- a network flow is received, to consider the conversation ended.
- This delay is ignored when a FIN packet is collected for TCP
- flows (see `conversationTerminatingTimeout` instead).'
- type: string
- conversationHeartbeatInterval:
- default: 30s
- description: '`conversationHeartbeatInterval` is the time to wait
- between "tick" events of a conversation'
- type: string
- conversationTerminatingTimeout:
- default: 5s
- description: '`conversationTerminatingTimeout` is the time to
- wait from detected FIN flag to end a conversation. Only relevant
- for TCP flows.'
- type: string
debug:
description: '`debug` allows setting some aspects of the internal
configuration of the flow processor. This section is aimed exclusively
@@ -6866,6 +6839,47 @@ spec:
as `GOGC` and `GOMAXPROCS` env vars. Users setting its values
do it at their own risk.'
properties:
+ batchSize:
+ default: 102400
+ description: '`batchSize` is the maximum batch size (in bytes)
+ of logs to accumulate before sending.'
+ format: int64
+ minimum: 1
+ type: integer
+ batchWait:
+ default: 1s
+ description: '`batchWait` is the maximum time to wait before
+ sending a batch.'
+ type: string
+ conversationEndTimeout:
+ default: 10s
+ description: '`conversationEndTimeout` is the time to wait
+ after a network flow is received, to consider the conversation
+ ended. This delay is ignored when a FIN packet is collected
+ for TCP flows (see `conversationTerminatingTimeout` instead).'
+ type: string
+ conversationHeartbeatInterval:
+ default: 30s
+ description: '`conversationHeartbeatInterval` is the time
+ to wait between "tick" events of a conversation'
+ type: string
+ conversationTerminatingTimeout:
+ default: 5s
+ description: '`conversationTerminatingTimeout` is the time
+ to wait from detected FIN flag to end a conversation. Only
+ relevant for TCP flows.'
+ type: string
+ dropUnusedFields:
+ default: true
+ description: '`dropUnusedFields` allows, when set to `true`,
+ to drop fields that are known to be unused by OVS, to save
+ storage space.'
+ type: boolean
+ enableKubeProbes:
+ default: true
+ description: '`enableKubeProbes` is a flag to enable or disable
+ Kubernetes liveness and readiness probes'
+ type: boolean
env:
additionalProperties:
type: string
@@ -6876,26 +6890,56 @@ spec:
descriptor, as they are only useful in edge debug or support
scenarios.'
type: object
+ healthPort:
+ default: 8080
+ description: '`healthPort` is a collector HTTP port in the
+ Pod that exposes the health check API'
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ maxBackoff:
+ default: 5s
+ description: '`maxBackoff` is the maximum backoff time for
+ client connection between retries.'
+ type: string
+ maxRetries:
+ default: 2
+ description: '`maxRetries` is the maximum number of retries
+ for client connections.'
+ format: int32
+ minimum: 0
+ type: integer
+ minBackoff:
+ default: 1s
+ description: '`minBackoff` is the initial backoff time for
+ client connection between retries.'
+ type: string
+ port:
+ default: 2055
+ description: Port of the flow collector (host port). By convention,
+ some values are forbidden. It must be greater than 1024
+ and different from 4500, 4789 and 6081.
+ format: int32
+ maximum: 65535
+ minimum: 1025
+ type: integer
+ profilePort:
+ description: '`profilePort` allows setting up a Go pprof profiler
+ listening to this port'
+ format: int32
+ maximum: 65535
+ minimum: 0
+ type: integer
+ staticLabels:
+ additionalProperties:
+ type: string
+ default:
+ app: netobserv-flowcollector
+ description: '`staticLabels` is a map of common labels to
+ set on each flow.'
+ type: object
type: object
- dropUnusedFields:
- default: true
- description: '`dropUnusedFields` allows, when set to `true`, to
- drop fields that are known to be unused by OVS, to save storage
- space.'
- type: boolean
- enableKubeProbes:
- default: true
- description: '`enableKubeProbes` is a flag to enable or disable
- Kubernetes liveness and readiness probes'
- type: boolean
- healthPort:
- default: 8080
- description: '`healthPort` is a collector HTTP port in the Pod
- that exposes the health check API'
- format: int32
- maximum: 65535
- minimum: 1
- type: integer
imagePullPolicy:
default: IfNotPresent
description: '`imagePullPolicy` is the Kubernetes pull policy
@@ -7625,22 +7669,6 @@ spec:
type: object
type: object
type: object
- port:
- default: 2055
- description: Port of the flow collector (host port). By convention,
- some values are forbidden. It must be greater than 1024 and
- different from 4500, 4789 and 6081.
- format: int32
- maximum: 65535
- minimum: 1025
- type: integer
- profilePort:
- description: '`profilePort` allows setting up a Go pprof profiler
- listening to this port'
- format: int32
- maximum: 65535
- minimum: 0
- type: integer
resources:
default:
limits:
diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
index ec6b086e8..071dc12f3 100644
--- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
@@ -5762,6 +5762,51 @@ spec:
- ENABLED
type: string
type: object
+ debug:
+ description: '`debug` allows setting some aspects of the internal
+ configuration of the console plugin. This section is aimed exclusively
+ for debugging and fine-grained performance optimizations, such
+ as `GOGC` and `GOMAXPROCS` env vars. Users setting its values
+ do it at their own risk.'
+ properties:
+ args:
+ description: '`args` allows passing custom arguments to underlying
+ components. Useful for overriding some parameters, such
+ as an url or a configuration path, that should not be publicly
+ exposed as part of the FlowCollector descriptor, as they
+ are only useful in edge debug or support scenarios.'
+ items:
+ type: string
+ type: array
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables
+ to underlying components. Useful for passing some very concrete
+ performance-tuning options, such as `GOGC` and `GOMAXPROCS`,
+ that should not be publicly exposed as part of the FlowCollector
+ descriptor, as they are only useful in edge debug or support
+ scenarios.'
+ type: object
+ port:
+ default: 9001
+ description: '`port` is the plugin service port. Do not use
+ 9002, which is reserved for metrics.'
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ register:
+ default: true
+ description: '`register` allows, when set to `true`, to automatically
+ register the provided console plugin with the OpenShift
+ Console operator. When set to `false`, you can still register
+ it manually by editing console.operator.openshift.io/cluster
+ with the following command: `oc patch console.operator.openshift.io
+ cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-",
+ "value": "netobserv-plugin"}]''`'
+ type: boolean
+ type: object
enable:
default: true
description: Enables the console plugin deployment. `spec.Loki.enable`
@@ -5788,14 +5833,6 @@ spec:
- fatal
- panic
type: string
- port:
- default: 9001
- description: '`port` is the plugin service port. Do not use 9002,
- which is reserved for metrics.'
- format: int32
- maximum: 65535
- minimum: 1
- type: integer
portNaming:
default:
enable: true
@@ -5860,15 +5897,6 @@ spec:
- name
type: object
type: array
- register:
- default: true
- description: '`register` allows, when set to `true`, to automatically
- register the provided console plugin with the OpenShift Console
- operator. When set to `false`, you can still register it manually
- by editing console.operator.openshift.io/cluster with the following
- command: `oc patch console.operator.openshift.io cluster --type=''json''
- -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`'
- type: boolean
replicas:
default: 1
description: '`replicas` defines the number of replicas (pods)
@@ -6332,18 +6360,6 @@ spec:
loki:
description: '`loki`, the flow store, client settings.'
properties:
- batchSize:
- default: 102400
- description: '`batchSize` is the maximum batch size (in bytes)
- of logs to accumulate before sending.'
- format: int64
- minimum: 1
- type: integer
- batchWait:
- default: 1s
- description: '`batchWait` is the maximum time to wait before sending
- a batch.'
- type: string
distributed:
description: Loki configuration for DISTRIBUTED mode. This is
usefull for an easy microservices loki config. It will be ignored
@@ -6678,23 +6694,6 @@ spec:
type: object
type: object
type: object
- maxBackoff:
- default: 5s
- description: '`maxBackoff` is the maximum backoff time for client
- connection between retries.'
- type: string
- maxRetries:
- default: 2
- description: '`maxRetries` is the maximum number of retries for
- client connections.'
- format: int32
- minimum: 0
- type: integer
- minBackoff:
- default: 1s
- description: '`minBackoff` is the initial backoff time for client
- connection between retries.'
- type: string
mode:
default: MANUAL
enum:
@@ -6797,14 +6796,6 @@ spec:
service that point both ingester and querier.'
type: string
type: object
- staticLabels:
- additionalProperties:
- type: string
- default:
- app: netobserv-flowcollector
- description: '`staticLabels` is a map of common labels to set
- on each flow.'
- type: object
timeout:
default: 10s
description: '`timeout` is the maximum time connection / request
@@ -6827,24 +6818,6 @@ spec:
in the flows data. This is useful in a multi-cluster context.
When using OpenShift, leave empty to make it automatically determined.'
type: string
- conversationEndTimeout:
- default: 10s
- description: '`conversationEndTimeout` is the time to wait after
- a network flow is received, to consider the conversation ended.
- This delay is ignored when a FIN packet is collected for TCP
- flows (see `conversationTerminatingTimeout` instead).'
- type: string
- conversationHeartbeatInterval:
- default: 30s
- description: '`conversationHeartbeatInterval` is the time to wait
- between "tick" events of a conversation'
- type: string
- conversationTerminatingTimeout:
- default: 5s
- description: '`conversationTerminatingTimeout` is the time to
- wait from detected FIN flag to end a conversation. Only relevant
- for TCP flows.'
- type: string
debug:
description: '`debug` allows setting some aspects of the internal
configuration of the flow processor. This section is aimed exclusively
@@ -6852,6 +6825,47 @@ spec:
as `GOGC` and `GOMAXPROCS` env vars. Users setting its values
do it at their own risk.'
properties:
+ batchSize:
+ default: 102400
+ description: '`batchSize` is the maximum batch size (in bytes)
+ of logs to accumulate before sending.'
+ format: int64
+ minimum: 1
+ type: integer
+ batchWait:
+ default: 1s
+ description: '`batchWait` is the maximum time to wait before
+ sending a batch.'
+ type: string
+ conversationEndTimeout:
+ default: 10s
+ description: '`conversationEndTimeout` is the time to wait
+ after a network flow is received, to consider the conversation
+ ended. This delay is ignored when a FIN packet is collected
+ for TCP flows (see `conversationTerminatingTimeout` instead).'
+ type: string
+ conversationHeartbeatInterval:
+ default: 30s
+ description: '`conversationHeartbeatInterval` is the time
+ to wait between "tick" events of a conversation'
+ type: string
+ conversationTerminatingTimeout:
+ default: 5s
+ description: '`conversationTerminatingTimeout` is the time
+ to wait from detected FIN flag to end a conversation. Only
+ relevant for TCP flows.'
+ type: string
+ dropUnusedFields:
+ default: true
+ description: '`dropUnusedFields` allows, when set to `true`,
+ to drop fields that are known to be unused by OVS, to save
+ storage space.'
+ type: boolean
+ enableKubeProbes:
+ default: true
+ description: '`enableKubeProbes` is a flag to enable or disable
+ Kubernetes liveness and readiness probes'
+ type: boolean
env:
additionalProperties:
type: string
@@ -6862,26 +6876,56 @@ spec:
descriptor, as they are only useful in edge debug or support
scenarios.'
type: object
+ healthPort:
+ default: 8080
+ description: '`healthPort` is a collector HTTP port in the
+ Pod that exposes the health check API'
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ maxBackoff:
+ default: 5s
+ description: '`maxBackoff` is the maximum backoff time for
+ client connection between retries.'
+ type: string
+ maxRetries:
+ default: 2
+ description: '`maxRetries` is the maximum number of retries
+ for client connections.'
+ format: int32
+ minimum: 0
+ type: integer
+ minBackoff:
+ default: 1s
+ description: '`minBackoff` is the initial backoff time for
+ client connection between retries.'
+ type: string
+ port:
+ default: 2055
+ description: Port of the flow collector (host port). By convention,
+ some values are forbidden. It must be greater than 1024
+ and different from 4500, 4789 and 6081.
+ format: int32
+ maximum: 65535
+ minimum: 1025
+ type: integer
+ profilePort:
+ description: '`profilePort` allows setting up a Go pprof profiler
+ listening to this port'
+ format: int32
+ maximum: 65535
+ minimum: 0
+ type: integer
+ staticLabels:
+ additionalProperties:
+ type: string
+ default:
+ app: netobserv-flowcollector
+ description: '`staticLabels` is a map of common labels to
+ set on each flow.'
+ type: object
type: object
- dropUnusedFields:
- default: true
- description: '`dropUnusedFields` allows, when set to `true`, to
- drop fields that are known to be unused by OVS, to save storage
- space.'
- type: boolean
- enableKubeProbes:
- default: true
- description: '`enableKubeProbes` is a flag to enable or disable
- Kubernetes liveness and readiness probes'
- type: boolean
- healthPort:
- default: 8080
- description: '`healthPort` is a collector HTTP port in the Pod
- that exposes the health check API'
- format: int32
- maximum: 65535
- minimum: 1
- type: integer
imagePullPolicy:
default: IfNotPresent
description: '`imagePullPolicy` is the Kubernetes pull policy
@@ -7611,22 +7655,6 @@ spec:
type: object
type: object
type: object
- port:
- default: 2055
- description: Port of the flow collector (host port). By convention,
- some values are forbidden. It must be greater than 1024 and
- different from 4500, 4789 and 6081.
- format: int32
- maximum: 65535
- minimum: 1025
- type: integer
- profilePort:
- description: '`profilePort` allows setting up a Go pprof profiler
- listening to this port'
- format: int32
- maximum: 65535
- minimum: 0
- type: integer
resources:
default:
limits:
diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go
index 7797eaeef..0d98fdbdb 100644
--- a/controllers/consoleplugin/consoleplugin_objects.go
+++ b/controllers/consoleplugin/consoleplugin_objects.go
@@ -70,7 +70,7 @@ func (b *builder) consolePlugin() *osv1alpha1.ConsolePlugin {
Service: osv1alpha1.ConsolePluginService{
Name: constants.PluginName,
Namespace: b.namespace,
- Port: b.desired.ConsolePlugin.Port,
+ Port: b.desired.ConsolePlugin.Debug.Port,
BasePath: "/",
},
Proxy: []osv1alpha1.ConsolePluginProxy{{
@@ -80,7 +80,7 @@ func (b *builder) consolePlugin() *osv1alpha1.ConsolePlugin {
Service: osv1alpha1.ConsolePluginProxyServiceConfig{
Name: constants.PluginName,
Namespace: b.namespace,
- Port: b.desired.ConsolePlugin.Port,
+ Port: b.desired.ConsolePlugin.Debug.Port,
},
}},
},
@@ -255,6 +255,17 @@ func (b *builder) podTemplate(cmDigest string) *corev1.PodTemplateSpec {
}
args := b.buildArgs(b.desired)
+ // append debug arguments if provided
+ if len(b.desired.ConsolePlugin.Debug.Args) > 0 {
+ args = append(args, b.desired.ConsolePlugin.Debug.Args...)
+ }
+
+ var envs []corev1.EnvVar
+ // we need to sort env map to keep idempotency,
+ // as equal maps could be iterated in different order
+ for _, pair := range helper.KeySorted(b.desired.Processor.Debug.Env) {
+ envs = append(envs, corev1.EnvVar{Name: pair[0], Value: pair[1]})
+ }
return &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@@ -271,6 +282,7 @@ func (b *builder) podTemplate(cmDigest string) *corev1.PodTemplateSpec {
Resources: *b.desired.ConsolePlugin.Resources.DeepCopy(),
VolumeMounts: b.volumes.AppendMounts(volumeMounts),
Args: args,
+ Env: envs,
}},
Volumes: b.volumes.AppendVolumes(volumes),
ServiceAccountName: constants.PluginName,
@@ -311,12 +323,12 @@ func (b *builder) mainService() *corev1.Service {
Spec: corev1.ServiceSpec{
Selector: b.selector,
Ports: []corev1.ServicePort{{
- Port: b.desired.ConsolePlugin.Port,
+ Port: b.desired.ConsolePlugin.Debug.Port,
Protocol: corev1.ProtocolTCP,
// Some Kubernetes versions might automatically set TargetPort to Port. We need to
// explicitly set it here so the reconcile loop verifies that the owned service
// is equal as the desired service
- TargetPort: intstr.FromInt(int(b.desired.ConsolePlugin.Port)),
+ TargetPort: intstr.FromInt(int(b.desired.ConsolePlugin.Debug.Port)),
}},
},
}
diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go
index a2d8cddcd..a8123aa8d 100644
--- a/controllers/consoleplugin/consoleplugin_reconciler.go
+++ b/controllers/consoleplugin/consoleplugin_reconciler.go
@@ -130,7 +130,7 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC
func (r *CPReconciler) checkAutoPatch(ctx context.Context, desired *flowslatest.FlowCollector) error {
console := operatorsv1.Console{}
- reg := helper.UseConsolePlugin(&desired.Spec) && helper.PtrBool(desired.Spec.ConsolePlugin.Register)
+ reg := helper.UseConsolePlugin(&desired.Spec) && helper.PtrBool(desired.Spec.ConsolePlugin.Debug.Register)
if err := r.Client.Get(ctx, types.NamespacedName{Name: "cluster"}, &console); err != nil {
// Console operator CR not found => warn but continue execution
if reg {
@@ -265,5 +265,5 @@ func (r *CPReconciler) reconcileHPA(ctx context.Context, builder *builder, desir
}
func pluginNeedsUpdate(plg *osv1alpha1.ConsolePlugin, desired *pluginSpec) bool {
- return plg.Spec.Service.Port != desired.Port
+ return plg.Spec.Service.Port != desired.Debug.Port
}
diff --git a/controllers/consoleplugin/consoleplugin_test.go b/controllers/consoleplugin/consoleplugin_test.go
index bc70cb266..25d659167 100644
--- a/controllers/consoleplugin/consoleplugin_test.go
+++ b/controllers/consoleplugin/consoleplugin_test.go
@@ -31,7 +31,6 @@ var testResources = corev1.ResourceRequirements{
func getPluginConfig() flowslatest.FlowCollectorConsolePlugin {
return flowslatest.FlowCollectorConsolePlugin{
Enable: ptr.To(true),
- Port: 9001,
ImagePullPolicy: string(testPullPolicy),
Resources: testResources,
Autoscaler: flowslatest.FlowCollectorHPA{
diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go
index 072157ff4..fb9a8265e 100644
--- a/controllers/ebpf/agent_controller.go
+++ b/controllers/ebpf/agent_controller.go
@@ -337,7 +337,7 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC
},
}, corev1.EnvVar{
Name: envFlowsTargetPort,
- Value: strconv.Itoa(int(coll.Spec.Processor.Port)),
+ Value: strconv.Itoa(int(coll.Spec.Processor.Debug.Port)),
})
}
return config, nil
diff --git a/controllers/flowcollector_controller_console_test.go b/controllers/flowcollector_controller_console_test.go
index 30f37b1c2..12656d13f 100644
--- a/controllers/flowcollector_controller_console_test.go
+++ b/controllers/flowcollector_controller_console_test.go
@@ -77,9 +77,7 @@ func flowCollectorConsolePluginSpecs() {
Agent: flowslatest.FlowCollectorAgent{Type: "IPFIX"},
ConsolePlugin: flowslatest.FlowCollectorConsolePlugin{
Enable: ptr.To(true),
- Port: 9001,
ImagePullPolicy: "Never",
- Register: ptr.To(true),
Autoscaler: flowslatest.FlowCollectorHPA{
Status: flowslatest.HPAStatusEnabled,
MinReplicas: ptr.To(int32(1)),
@@ -152,7 +150,7 @@ func flowCollectorConsolePluginSpecs() {
if err := k8sClient.Get(ctx, crKey, &fc); err != nil {
return err
}
- fc.Spec.ConsolePlugin.Port = 9099
+ fc.Spec.ConsolePlugin.Debug.Port = 9099
fc.Spec.ConsolePlugin.Replicas = ptr.To(int32(2))
fc.Spec.ConsolePlugin.Autoscaler.Status = flowslatest.HPAStatusDisabled
return k8sClient.Update(ctx, &fc)
@@ -243,7 +241,7 @@ func flowCollectorConsolePluginSpecs() {
It("Should be unregistered", func() {
By("Update CR to unregister")
UpdateCR(crKey, func(fc *flowslatest.FlowCollector) {
- fc.Spec.ConsolePlugin.Register = ptr.To(false)
+ fc.Spec.ConsolePlugin.Debug.Register = ptr.To(false)
})
By("Expecting the Console CR to not have plugin registered")
diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go
index 1626aa23a..98518c74f 100644
--- a/controllers/flowcollector_controller_ebpf_test.go
+++ b/controllers/flowcollector_controller_ebpf_test.go
@@ -56,9 +56,11 @@ func flowCollectorEBPFSpecs() {
Namespace: operatorNamespace,
DeploymentModel: flowslatest.DeploymentModelDirect,
Processor: flowslatest.FlowCollectorFLP{
- Port: 9999,
ImagePullPolicy: "Never",
LogLevel: "error",
+ Debug: flowslatest.DebugProcessorConfig{
+ Port: 9999,
+ },
},
Agent: flowslatest.FlowCollectorAgent{
Type: "EBPF",
@@ -69,12 +71,8 @@ func flowCollectorEBPFSpecs() {
Interfaces: []string{"veth0", "/^br-/"},
ExcludeInterfaces: []string{"br-3", "lo"},
LogLevel: "trace",
- Debug: flowslatest.DebugConfig{
- Env: map[string]string{
- // we'll test that multiple variables are reordered
- "GOGC": "400",
- "BUFFERS_LENGTH": "100",
- },
+ Debug: flowslatest.DebugAgentConfig{
+ Env: map[string]string{"GOGC": "400", "BUFFERS_LENGTH": "100"},
},
},
},
diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go
index f21025205..bcc8a4710 100644
--- a/controllers/flowcollector_controller_iso_test.go
+++ b/controllers/flowcollector_controller_iso_test.go
@@ -40,22 +40,31 @@ func flowCollectorIsoSpecs() {
Namespace: operatorNamespace,
DeploymentModel: flowslatest.DeploymentModelDirect,
Processor: flowslatest.FlowCollectorFLP{
- Port: 12345,
- HealthPort: 12346,
- ProfilePort: 0,
- ImagePullPolicy: "Always",
- LogLevel: "trace",
- Resources: v1.ResourceRequirements{Limits: nil, Requests: nil},
- KafkaConsumerReplicas: &zero,
- KafkaConsumerAutoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}},
- KafkaConsumerQueueCapacity: int(zero),
- KafkaConsumerBatchSize: int(zero),
- ConversationHeartbeatInterval: &metav1.Duration{Duration: time.Second},
- ConversationEndTimeout: &metav1.Duration{Duration: time.Second},
- ConversationTerminatingTimeout: &metav1.Duration{Duration: time.Second},
- ClusterName: "testCluster",
- Debug: flowslatest.DebugConfig{},
- LogTypes: &outputRecordTypes,
+ ImagePullPolicy: "Always",
+ LogLevel: "trace",
+ Resources: v1.ResourceRequirements{Limits: nil, Requests: nil},
+ KafkaConsumerReplicas: &zero,
+ KafkaConsumerAutoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}},
+ KafkaConsumerQueueCapacity: int(zero),
+ KafkaConsumerBatchSize: int(zero),
+ ClusterName: "testCluster",
+ Debug: flowslatest.DebugProcessorConfig{
+ Port: 12345,
+ HealthPort: 12346,
+ ProfilePort: 0,
+ ConversationHeartbeatInterval: &metav1.Duration{Duration: time.Second},
+ ConversationEndTimeout: &metav1.Duration{Duration: time.Second},
+ ConversationTerminatingTimeout: &metav1.Duration{Duration: time.Second},
+ EnableKubeProbes: ptr.To(false),
+ DropUnusedFields: ptr.To(false),
+ BatchWait: &metav1.Duration{Duration: time.Second},
+ BatchSize: 100,
+ MinBackoff: &metav1.Duration{Duration: time.Second},
+ MaxBackoff: &metav1.Duration{Duration: time.Second},
+ MaxRetries: &zero,
+ StaticLabels: map[string]string{},
+ },
+ LogTypes: &outputRecordTypes,
Metrics: flowslatest.FLPMetrics{
Server: flowslatest.MetricsServerConfig{
Port: 12347,
@@ -67,8 +76,6 @@ func flowCollectorIsoSpecs() {
IgnoreTags: []string{},
DisableAlerts: []flowslatest.FLPAlert{},
},
- EnableKubeProbes: ptr.To(false),
- DropUnusedFields: ptr.To(false),
},
Agent: flowslatest.FlowCollectorAgent{
Type: "EBPF",
@@ -91,7 +98,7 @@ func flowCollectorIsoSpecs() {
CacheActiveTimeout: "5s",
CacheMaxFlows: 100,
ImagePullPolicy: "Always",
- Debug: flowslatest.DebugConfig{},
+ Debug: flowslatest.DebugAgentConfig{},
LogLevel: "trace",
Resources: v1.ResourceRequirements{Limits: nil, Requests: nil},
Interfaces: []string{},
@@ -103,13 +110,15 @@ func flowCollectorIsoSpecs() {
},
ConsolePlugin: flowslatest.FlowCollectorConsolePlugin{
Enable: ptr.To(true),
- Register: ptr.To(false),
Replicas: &zero,
- Port: 12345,
ImagePullPolicy: "Always",
- Resources: v1.ResourceRequirements{Limits: nil, Requests: nil},
- LogLevel: "trace",
- Autoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}},
+ Debug: flowslatest.DebugPluginConfig{
+ Register: ptr.To(false),
+ Port: 12345,
+ },
+ Resources: v1.ResourceRequirements{Limits: nil, Requests: nil},
+ LogLevel: "trace",
+ Autoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}},
PortNaming: flowslatest.ConsolePluginPortConfig{
Enable: ptr.To(false),
PortNames: map[string]string{},
@@ -155,15 +164,9 @@ func flowCollectorIsoSpecs() {
},
},
},
- Enable: ptr.To(true),
- Mode: flowslatest.LokiModeManual,
- BatchWait: &metav1.Duration{Duration: time.Second},
- BatchSize: 100,
- Timeout: &metav1.Duration{Duration: time.Second},
- MinBackoff: &metav1.Duration{Duration: time.Second},
- MaxBackoff: &metav1.Duration{Duration: time.Second},
- MaxRetries: &zero,
- StaticLabels: map[string]string{},
+ Enable: ptr.To(true),
+ Mode: flowslatest.LokiModeManual,
+ Timeout: &metav1.Duration{Duration: time.Second},
},
Kafka: flowslatest.FlowCollectorKafka{
Address: "http://kafka",
diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go
index 254d2d26b..d94c7c96b 100644
--- a/controllers/flowcollector_controller_test.go
+++ b/controllers/flowcollector_controller_test.go
@@ -94,24 +94,25 @@ func flowCollectorControllerSpecs() {
Namespace: operatorNamespace,
DeploymentModel: flowslatest.DeploymentModelDirect,
Processor: flowslatest.FlowCollectorFLP{
- Port: 9999,
ImagePullPolicy: "Never",
LogLevel: "error",
- Debug: flowslatest.DebugConfig{
+ Debug: flowslatest.DebugProcessorConfig{
Env: map[string]string{
"GOGC": "200",
},
+ Port: 9999,
+ ConversationHeartbeatInterval: &metav1.Duration{
+ Duration: conntrackHeartbeatInterval,
+ },
+ ConversationEndTimeout: &metav1.Duration{
+ Duration: conntrackEndTimeout,
+ },
+ ConversationTerminatingTimeout: &metav1.Duration{
+ Duration: conntrackTerminatingTimeout,
+ },
},
LogTypes: &outputRecordTypes,
- ConversationHeartbeatInterval: &metav1.Duration{
- Duration: conntrackHeartbeatInterval,
- },
- ConversationEndTimeout: &metav1.Duration{
- Duration: conntrackEndTimeout,
- },
- ConversationTerminatingTimeout: &metav1.Duration{
- Duration: conntrackTerminatingTimeout,
- },
+
Metrics: flowslatest.FLPMetrics{
IgnoreTags: []string{"flows"},
},
@@ -124,7 +125,6 @@ func flowCollectorControllerSpecs() {
},
ConsolePlugin: flowslatest.FlowCollectorConsolePlugin{
Enable: ptr.To(true),
- Port: 9001,
ImagePullPolicy: "Never",
PortNaming: flowslatest.ConsolePluginPortConfig{
Enable: ptr.To(true),
@@ -252,26 +252,27 @@ func flowCollectorControllerSpecs() {
It("Should update successfully", func() {
UpdateCR(crKey, func(fc *flowslatest.FlowCollector) {
fc.Spec.Processor = flowslatest.FlowCollectorFLP{
- Port: 7891,
ImagePullPolicy: "Never",
LogLevel: "error",
- Debug: flowslatest.DebugConfig{
+ Debug: flowslatest.DebugProcessorConfig{
Env: map[string]string{
// we'll test that env vars are sorted, to keep idempotency
"GOMAXPROCS": "33",
"GOGC": "400",
},
+ Port: 7891,
+ ConversationHeartbeatInterval: &metav1.Duration{
+ Duration: conntrackHeartbeatInterval,
+ },
+ ConversationEndTimeout: &metav1.Duration{
+ Duration: conntrackEndTimeout,
+ },
+ ConversationTerminatingTimeout: &metav1.Duration{
+ Duration: conntrackTerminatingTimeout,
+ },
},
LogTypes: &outputRecordTypes,
- ConversationHeartbeatInterval: &metav1.Duration{
- Duration: conntrackHeartbeatInterval,
- },
- ConversationEndTimeout: &metav1.Duration{
- Duration: conntrackEndTimeout,
- },
- ConversationTerminatingTimeout: &metav1.Duration{
- Duration: conntrackTerminatingTimeout,
- },
+
Metrics: flowslatest.FLPMetrics{
IgnoreTags: []string{"flows", "bytes", "packets"},
DisableAlerts: []flowslatest.FLPAlert{flowslatest.AlertLokiError},
@@ -369,7 +370,7 @@ func flowCollectorControllerSpecs() {
It("Should redeploy if the spec doesn't change but the external flowlogs-pipeline-config does", func() {
UpdateCR(crKey, func(fc *flowslatest.FlowCollector) {
- fc.Spec.Loki.MaxRetries = ptr.To(int32(7))
+ fc.Spec.Processor.Debug.MaxRetries = ptr.To(int32(7))
})
By("Expecting that the flowlogsPipeline.PodConfigurationDigest attribute has changed")
@@ -848,7 +849,7 @@ func flowCollectorControllerSpecs() {
Context("Changing namespace", func() {
It("Should update namespace successfully", func() {
UpdateCR(crKey, func(fc *flowslatest.FlowCollector) {
- fc.Spec.Processor.Port = 9999
+ fc.Spec.Processor.Debug.Port = 9999
fc.Spec.Namespace = otherNamespace
fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{
Sampling: 200,
diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go
index b166a4364..e9405875b 100644
--- a/controllers/flowlogspipeline/flp_common_objects.go
+++ b/controllers/flowlogspipeline/flp_common_objects.go
@@ -130,8 +130,8 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str
if hasHostPort {
ports = []corev1.ContainerPort{{
Name: constants.FLPPortName,
- HostPort: b.desired.Processor.Port,
- ContainerPort: b.desired.Processor.Port,
+ HostPort: b.desired.Processor.Debug.Port,
+ ContainerPort: b.desired.Processor.Debug.Port,
Protocol: b.portProtocol(),
}}
// This allows deploying an instance in the master node, the same technique used in the
@@ -141,7 +141,7 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str
ports = append(ports, corev1.ContainerPort{
Name: healthServiceName,
- ContainerPort: b.desired.Processor.HealthPort,
+ ContainerPort: b.desired.Processor.Debug.HealthPort,
})
ports = append(ports, corev1.ContainerPort{
@@ -149,10 +149,10 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str
ContainerPort: b.desired.Processor.Metrics.Server.Port,
})
- if b.desired.Processor.ProfilePort > 0 {
+ if b.desired.Processor.Debug.ProfilePort > 0 {
ports = append(ports, corev1.ContainerPort{
Name: profilePortName,
- ContainerPort: b.desired.Processor.ProfilePort,
+ ContainerPort: b.desired.Processor.Debug.ProfilePort,
Protocol: corev1.ProtocolTCP,
})
}
@@ -189,7 +189,7 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str
Ports: ports,
Env: envs,
}
- if helper.PtrBool(b.desired.Processor.EnableKubeProbes) {
+ if helper.PtrBool(b.desired.Processor.Debug.EnableKubeProbes) {
container.LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
@@ -312,11 +312,11 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error {
if helper.UseLoki(b.desired) {
lokiWrite := api.WriteLoki{
Labels: indexFields,
- BatchSize: int(b.desired.Loki.BatchSize),
- BatchWait: helper.UnstructuredDuration(b.desired.Loki.BatchWait),
- MaxBackoff: helper.UnstructuredDuration(b.desired.Loki.MaxBackoff),
- MaxRetries: int(helper.PtrInt32(b.desired.Loki.MaxRetries)),
- MinBackoff: helper.UnstructuredDuration(b.desired.Loki.MinBackoff),
+ BatchSize: int(b.desired.Processor.Debug.BatchSize),
+ BatchWait: helper.UnstructuredDuration(b.desired.Processor.Debug.BatchWait),
+ MaxBackoff: helper.UnstructuredDuration(b.desired.Processor.Debug.MaxBackoff),
+ MaxRetries: int(helper.PtrInt32(b.desired.Processor.Debug.MaxRetries)),
+ MinBackoff: helper.UnstructuredDuration(b.desired.Processor.Debug.MinBackoff),
StaticLabels: model.LabelSet{},
Timeout: helper.UnstructuredDuration(b.desired.Loki.Timeout),
URL: helper.LokiIngesterURL(&b.desired.Loki),
@@ -325,7 +325,7 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error {
TenantID: helper.LokiTenantID(&b.desired.Loki),
}
- for k, v := range b.desired.Loki.StaticLabels {
+ for k, v := range b.desired.Processor.Debug.StaticLabels {
lokiWrite.StaticLabels[model.LabelName(k)] = model.LabelValue(v)
}
@@ -499,18 +499,18 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P
outputRecordTypes := helper.GetRecordTypes(&b.desired.Processor)
terminatingTimeout := conntrackTerminatingTimeout
- if b.desired.Processor.ConversationTerminatingTimeout != nil {
- terminatingTimeout = b.desired.Processor.ConversationTerminatingTimeout.Duration
+ if b.desired.Processor.Debug.ConversationTerminatingTimeout != nil {
+ terminatingTimeout = b.desired.Processor.Debug.ConversationTerminatingTimeout.Duration
}
endTimeout := conntrackEndTimeout
- if b.desired.Processor.ConversationEndTimeout != nil {
- endTimeout = b.desired.Processor.ConversationEndTimeout.Duration
+ if b.desired.Processor.Debug.ConversationEndTimeout != nil {
+ endTimeout = b.desired.Processor.Debug.ConversationEndTimeout.Duration
}
heartbeatInterval := conntrackHeartbeatInterval
- if b.desired.Processor.ConversationHeartbeatInterval != nil {
- heartbeatInterval = b.desired.Processor.ConversationHeartbeatInterval.Duration
+ if b.desired.Processor.Debug.ConversationHeartbeatInterval != nil {
+ heartbeatInterval = b.desired.Processor.Debug.ConversationHeartbeatInterval.Duration
}
lastStage = lastStage.ConnTrack("extract_conntrack", api.ConnTrack{
@@ -569,7 +569,7 @@ func (b *builder) addTransformFilter(lastStage config.PipelineBuilderStage) conf
}
// Filter-out unused fields?
- if helper.PtrBool(b.desired.Processor.DropUnusedFields) {
+ if helper.PtrBool(b.desired.Processor.Debug.DropUnusedFields) {
if helper.UseIPFIX(b.desired) {
rules := filters.GetOVSGoflowUnusedRules()
transformFilterRules = append(transformFilterRules, rules...)
@@ -672,15 +672,15 @@ func (b *builder) configMap(stages []config.Stage, parameters []config.StagePara
config := map[string]interface{}{
"log-level": b.desired.Processor.LogLevel,
"health": map[string]interface{}{
- "port": b.desired.Processor.HealthPort,
+ "port": b.desired.Processor.Debug.HealthPort,
},
"pipeline": stages,
"parameters": parameters,
"metricsSettings": metricsSettings,
}
- if b.desired.Processor.ProfilePort > 0 {
+ if b.desired.Processor.Debug.ProfilePort > 0 {
config["profile"] = map[string]interface{}{
- "port": b.desired.Processor.ProfilePort,
+ "port": b.desired.Processor.Debug.ProfilePort,
}
}
diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go
index a8c3cec8f..6506eaf53 100644
--- a/controllers/flowlogspipeline/flp_ingest_objects.go
+++ b/controllers/flowlogspipeline/flp_ingest_objects.go
@@ -54,13 +54,13 @@ func (b *ingestBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePar
if helper.UseIPFIX(b.generic.desired) {
// IPFIX collector
pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{
- Port: int(b.generic.desired.Processor.Port),
+ Port: int(b.generic.desired.Processor.Debug.Port),
HostName: "0.0.0.0",
})
} else {
// GRPC collector (eBPF agent)
pipeline = config.NewGRPCPipeline("grpc", api.IngestGRPCProto{
- Port: int(b.generic.desired.Processor.Port),
+ Port: int(b.generic.desired.Processor.Debug.Port),
})
}
diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go
index e846d1e07..23a7806a8 100644
--- a/controllers/flowlogspipeline/flp_monolith_objects.go
+++ b/controllers/flowlogspipeline/flp_monolith_objects.go
@@ -55,13 +55,13 @@ func (b *monolithBuilder) buildPipelineConfig() ([]config.Stage, []config.StageP
if helper.UseIPFIX(b.generic.desired) {
// IPFIX collector
pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{
- Port: int(b.generic.desired.Processor.Port),
+ Port: int(b.generic.desired.Processor.Debug.Port),
HostName: "0.0.0.0",
})
} else {
// GRPC collector (eBPF agent)
pipeline = config.NewGRPCPipeline("grpc", api.IngestGRPCProto{
- Port: int(b.generic.desired.Processor.Port),
+ Port: int(b.generic.desired.Processor.Debug.Port),
})
}
diff --git a/controllers/flowlogspipeline/flp_reconciler.go b/controllers/flowlogspipeline/flp_reconciler.go
index 1a97201af..338648074 100644
--- a/controllers/flowlogspipeline/flp_reconciler.go
+++ b/controllers/flowlogspipeline/flp_reconciler.go
@@ -44,10 +44,10 @@ func (r *FLPReconciler) CleanupNamespace(ctx context.Context) {
}
func validateDesired(desired *flpSpec) error {
- if desired.Port == 4789 ||
- desired.Port == 6081 ||
- desired.Port == 500 ||
- desired.Port == 4500 {
+ if desired.Debug.Port == 4789 ||
+ desired.Debug.Port == 6081 ||
+ desired.Debug.Port == 500 ||
+ desired.Debug.Port == 4500 {
return fmt.Errorf("flowlogs-pipeline port value is not authorized")
}
return nil
diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go
index 7138c06ea..9ca8625f9 100644
--- a/controllers/flowlogspipeline/flp_test.go
+++ b/controllers/flowlogspipeline/flp_test.go
@@ -59,11 +59,9 @@ func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec {
DeploymentModel: flowslatest.DeploymentModelDirect,
Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIPFIX},
Processor: flowslatest.FlowCollectorFLP{
- Port: 2055,
ImagePullPolicy: string(pullPolicy),
LogLevel: "trace",
Resources: resources,
- HealthPort: 8080,
Metrics: flowslatest.FLPMetrics{
Server: flowslatest.MetricsServerConfig{
Port: 9090,
@@ -89,14 +87,19 @@ func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec {
}},
},
LogTypes: &outputRecordTypes,
- ConversationHeartbeatInterval: &metav1.Duration{
- Duration: conntrackHeartbeatInterval,
- },
- ConversationEndTimeout: &metav1.Duration{
- Duration: conntrackEndTimeout,
- },
- ConversationTerminatingTimeout: &metav1.Duration{
- Duration: conntrackTerminatingTimeout,
+ Debug: flowslatest.DebugProcessorConfig{
+ BatchWait: &metav1.Duration{
+ Duration: 1,
+ },
+ BatchSize: 102400,
+ MinBackoff: &metav1.Duration{
+ Duration: 1,
+ },
+ MaxBackoff: &metav1.Duration{
+ Duration: 300,
+ },
+ MaxRetries: ptr.To(int32(10)),
+ StaticLabels: map[string]string{"app": "netobserv-flowcollector"},
},
},
Loki: getLoki(lokiMode...),
@@ -115,38 +118,14 @@ func getLoki(lokiMode ...string) flowslatest.FlowCollectorLoki {
Name: "lokistack",
Namespace: "ls-namespace",
},
- BatchWait: &metav1.Duration{
- Duration: 1,
- },
- BatchSize: 102400,
- MinBackoff: &metav1.Duration{
- Duration: 1,
- },
- MaxBackoff: &metav1.Duration{
- Duration: 300,
- },
- Enable: ptr.To(true),
- MaxRetries: ptr.To(int32(10)),
- StaticLabels: map[string]string{"app": "netobserv-flowcollector"},
+ Enable: ptr.To(true),
}
}
}
// defaults to MANUAL mode if no other mode was selected
return flowslatest.FlowCollectorLoki{Mode: "MANUAL", Manual: flowslatest.LokiManualParams{
IngesterURL: "http://loki:3100/"},
- BatchWait: &metav1.Duration{
- Duration: 1,
- },
- BatchSize: 102400,
- MinBackoff: &metav1.Duration{
- Duration: 1,
- },
- MaxBackoff: &metav1.Duration{
- Duration: 300,
- },
- Enable: ptr.To(true),
- MaxRetries: ptr.To(int32(10)),
- StaticLabels: map[string]string{"app": "netobserv-flowcollector"},
+ Enable: ptr.To(true),
}
}
@@ -237,7 +216,7 @@ func TestDaemonSetChanged(t *testing.T) {
first := b.daemonSet(annotate(digest))
// Check probes enabled change
- cfg.Processor.EnableKubeProbes = ptr.To(true)
+ cfg.Processor.Debug.EnableKubeProbes = ptr.To(true)
b = monoBuilder(ns, &cfg)
_, digest, err = b.configMap()
assert.NoError(err)
@@ -381,7 +360,7 @@ func TestDeploymentChanged(t *testing.T) {
first := b.deployment(annotate(digest))
// Check probes enabled change
- cfg.Processor.EnableKubeProbes = ptr.To(true)
+ cfg.Processor.Debug.EnableKubeProbes = ptr.To(true)
b = transfBuilder(ns, &cfg)
_, digest, err = b.configMap()
assert.NoError(err)
@@ -652,15 +631,15 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiManual(t *testing.T) {
params := decoded.Parameters
assert.Len(params, 6)
- assert.Equal(cfg.Processor.Port, int32(params[0].Ingest.Collector.Port))
+ assert.Equal(cfg.Processor.Debug.Port, int32(params[0].Ingest.Collector.Port))
lokiCfg := params[3].Write.Loki
assert.Equal(loki.Manual.IngesterURL, lokiCfg.URL)
- assert.Equal(loki.BatchWait.Duration.String(), lokiCfg.BatchWait)
- assert.Equal(loki.MinBackoff.Duration.String(), lokiCfg.MinBackoff)
- assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff)
- assert.EqualValues(*loki.MaxRetries, lokiCfg.MaxRetries)
- assert.EqualValues(loki.BatchSize, lokiCfg.BatchSize)
+ assert.Equal(cfg.Processor.Debug.BatchWait.Duration.String(), lokiCfg.BatchWait)
+ assert.Equal(cfg.Processor.Debug.MinBackoff.Duration.String(), lokiCfg.MinBackoff)
+ assert.Equal(cfg.Processor.Debug.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff)
+ assert.EqualValues(*cfg.Processor.Debug.MaxRetries, lokiCfg.MaxRetries)
+ assert.EqualValues(cfg.Processor.Debug.BatchSize, lokiCfg.BatchSize)
assert.EqualValues([]string{
"SrcK8S_Namespace",
"SrcK8S_OwnerName",
@@ -682,7 +661,6 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiStack(t *testing.T) {
ns := "namespace"
cfg := getConfig("LOKISTACK")
- loki := cfg.Loki
b := monoBuilder(ns, &cfg)
cm, digest, err := b.configMap()
assert.NoError(err)
@@ -701,7 +679,7 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiStack(t *testing.T) {
params := decoded.Parameters
assert.Len(params, 6)
- assert.Equal(cfg.Processor.Port, int32(params[0].Ingest.Collector.Port))
+ assert.Equal(cfg.Processor.Debug.Port, int32(params[0].Ingest.Collector.Port))
lokiCfg := params[3].Write.Loki
assert.Equal("https://lokistack-gateway-http.ls-namespace.svc:8080/api/logs/v1/network/", lokiCfg.URL)
@@ -712,11 +690,11 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiStack(t *testing.T) {
assert.Equal("/var/loki-certs-ca/service-ca.crt", lokiCfg.ClientConfig.TLSConfig.CAFile)
assert.Equal("", lokiCfg.ClientConfig.TLSConfig.CertFile)
assert.Equal("", lokiCfg.ClientConfig.TLSConfig.KeyFile)
- assert.Equal(loki.BatchWait.Duration.String(), lokiCfg.BatchWait)
- assert.Equal(loki.MinBackoff.Duration.String(), lokiCfg.MinBackoff)
- assert.Equal(loki.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff)
- assert.EqualValues(*loki.MaxRetries, lokiCfg.MaxRetries)
- assert.EqualValues(loki.BatchSize, lokiCfg.BatchSize)
+ assert.Equal(cfg.Processor.Debug.BatchWait.Duration.String(), lokiCfg.BatchWait)
+ assert.Equal(cfg.Processor.Debug.MinBackoff.Duration.String(), lokiCfg.MinBackoff)
+ assert.Equal(cfg.Processor.Debug.MaxBackoff.Duration.String(), lokiCfg.MaxBackoff)
+ assert.EqualValues(*cfg.Processor.Debug.MaxRetries, lokiCfg.MaxRetries)
+ assert.EqualValues(cfg.Processor.Debug.BatchSize, lokiCfg.BatchSize)
assert.EqualValues([]string{"SrcK8S_Namespace", "SrcK8S_OwnerName", "SrcK8S_Type", "DstK8S_Namespace", "DstK8S_OwnerName", "DstK8S_Type", "FlowDirection", "Duplicate", "_RecordType"}, lokiCfg.Labels)
assert.Equal(`{app="netobserv-flowcollector"}`, fmt.Sprintf("%v", lokiCfg.StaticLabels))
@@ -863,7 +841,7 @@ func TestPipelineConfigDropUnused(t *testing.T) {
ns := "namespace"
cfg := getConfig()
cfg.Processor.LogLevel = "info"
- cfg.Processor.DropUnusedFields = ptr.To(true)
+ cfg.Processor.Debug.DropUnusedFields = ptr.To(true)
b := monoBuilder(ns, &cfg)
stages, parameters, err := b.buildPipelineConfig()
assert.NoError(err)
diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go
index 1916372d9..8158ef009 100644
--- a/controllers/ovs/flowsconfig_cno_reconciler.go
+++ b/controllers/ovs/flowsconfig_cno_reconciler.go
@@ -106,7 +106,7 @@ func (c *FlowsConfigCNOController) desired(
return &flowsConfig{
FlowCollectorIPFIX: *corrected,
- NodePort: coll.Spec.Processor.Port,
+ NodePort: coll.Spec.Processor.Debug.Port,
}
}
diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go
index 926aed71c..8a9994f8d 100644
--- a/controllers/ovs/flowsconfig_ovnk_reconciler.go
+++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go
@@ -98,7 +98,7 @@ func (c *FlowsConfigOVNKController) desiredEnv(ctx context.Context, coll *flowsl
return envs, nil
}
- envs["OVN_IPFIX_TARGETS"] = fmt.Sprintf(":%d", coll.Spec.Processor.Port)
+ envs["OVN_IPFIX_TARGETS"] = fmt.Sprintf(":%d", coll.Spec.Processor.Debug.Port)
return envs, nil
}
diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md
index e5a6a0c9d..4f1019c28 100644
--- a/docs/FlowCollector.md
+++ b/docs/FlowCollector.md
@@ -9162,6 +9162,13 @@ ResourceClaim references one entry in PodSpec.ResourceClaims.
`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.
Name | +Type | +Description | +Required | +
---|---|---|---|
args | +[]string | +
+ `args` allows passing custom arguments to underlying components. Useful for overriding some parameters, such as an url or a configuration path, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. + |
+ false | +
env | +map[string]string | +
+ `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. + |
+ false | +
port | +integer | +
+ `port` is the plugin service port. Do not use 9002, which is reserved for metrics. + + Format: int32 + Default: 9001 + Minimum: 1 + Maximum: 65535 + |
+ false | +
register | +boolean | +
+ `register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator. When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'` + + Default: true + |
+ false | +