From ec8a9807b98684b774956b60d9f3244f6f62ffa9 Mon Sep 17 00:00:00 2001 From: "tomas.zdara" Date: Wed, 7 Jun 2023 14:49:57 +0200 Subject: [PATCH 01/37] DSET-3998 - export Logs resource info based on export_resource_info_on_event configuration --- exporter/datasetexporter/README.md | 2 + exporter/datasetexporter/config.go | 22 ++++- exporter/datasetexporter/config_test.go | 18 +++- exporter/datasetexporter/datasetexporter.go | 2 + exporter/datasetexporter/factory.go | 1 + exporter/datasetexporter/factory_test.go | 7 +- exporter/datasetexporter/logs_exporter.go | 13 ++- .../datasetexporter/logs_exporter_test.go | 91 ++++++++++++------- exporter/datasetexporter/testdata/config.yaml | 2 + 9 files changed, 121 insertions(+), 37 deletions(-) diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index 3e2548869915..ab21849b7e0d 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -33,6 +33,8 @@ If you do not want to specify `api_key` in the file, you can use the [builtin fu - `traces`: - `aggregate` (default = false): Count the number of spans and errors belonging to a trace. - `max_wait` (default = 5s): The maximum waiting for all spans from single trace to arrive; ignored if `aggregate` is false. +- `logs`: + - `export_resource_info_on_event` (default = false): Include resource info to DataSet Event while exporting Logs. - `retry_on_failure`: See [retry_on_failure](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `sending_queue`: See [sending_queue](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `timeout`: See [timeout](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) diff --git a/exporter/datasetexporter/config.go b/exporter/datasetexporter/config.go index 8eddae82a873..69a2d532b7e2 100644 --- a/exporter/datasetexporter/config.go +++ b/exporter/datasetexporter/config.go @@ -32,6 +32,22 @@ func newDefaultTracesSettings() TracesSettings { } } +const logsExportResourceInfoDefault = false + +type LogsSettings struct { + // ExportResourceInfo is optional flag to signal that the resource info is being exported to DataSet while exporting Logs. + // This is especially useful when reducing DataSet billable log volume. + // Default value: false. + ExportResourceInfo bool `mapstructure:"export_resource_info_on_event"` +} + +// newDefaultLogsSettings returns the default settings for LogsSettings. +func newDefaultLogsSettings() LogsSettings { + return LogsSettings{ + ExportResourceInfo: logsExportResourceInfoDefault, + } +} + const bufferMaxLifetime = 5 * time.Second const bufferRetryInitialInterval = 5 * time.Second const bufferRetryMaxInterval = 30 * time.Second @@ -61,6 +77,7 @@ type Config struct { APIKey configopaque.String `mapstructure:"api_key"` BufferSettings `mapstructure:"buffer"` TracesSettings `mapstructure:"traces"` + LogsSettings `mapstructure:"logs"` exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` exporterhelper.QueueSettings `mapstructure:"sending_queue"` exporterhelper.TimeoutSettings `mapstructure:"timeout"` @@ -96,7 +113,8 @@ func (c *Config) String() string { s += fmt.Sprintf("%s: %+v; ", "TracesSettings", c.TracesSettings) s += fmt.Sprintf("%s: %+v; ", "RetrySettings", c.RetrySettings) s += fmt.Sprintf("%s: %+v; ", "QueueSettings", c.QueueSettings) - s += fmt.Sprintf("%s: %+v", "TimeoutSettings", c.TimeoutSettings) + s += fmt.Sprintf("%s: %+v; ", "TimeoutSettings", c.TimeoutSettings) + s += fmt.Sprintf("%s: %+v", "LogsSettings", c.LogsSettings) return s } @@ -123,6 +141,7 @@ func (c *Config) convert() (*ExporterConfig, error) { }, }, tracesSettings: c.TracesSettings, + logsSettings: c.LogsSettings, }, nil } @@ -130,4 +149,5 @@ func (c *Config) convert() (*ExporterConfig, error) { type ExporterConfig struct { datasetConfig *datasetConfig.DataSetConfig tracesSettings TracesSettings + logsSettings LogsSettings } diff --git a/exporter/datasetexporter/config_test.go b/exporter/datasetexporter/config_test.go index 66c38e6d09d4..8bd938926da0 100644 --- a/exporter/datasetexporter/config_test.go +++ b/exporter/datasetexporter/config_test.go @@ -43,6 +43,7 @@ func TestConfigUseDefaults(t *testing.T) { assert.Equal(t, "secret", string(config.APIKey)) assert.Equal(t, bufferMaxLifetime, config.MaxLifetime) assert.Equal(t, tracesMaxWait, config.TracesSettings.MaxWait) + assert.Equal(t, logsExportResourceInfoDefault, config.LogsSettings.ExportResourceInfo) } func TestConfigValidate(t *testing.T) { @@ -114,7 +115,22 @@ func TestConfigString(t *testing.T) { } assert.Equal(t, - "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}", + "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}; LogsSettings: {ExportResourceInfo:false}", config.String(), ) } + +func TestConfigUseProvidedExportResourceInfoValue(t *testing.T) { + f := NewFactory() + config := f.CreateDefaultConfig().(*Config) + configMap := confmap.NewFromStringMap(map[string]interface{}{ + "dataset_url": "https://example.com", + "api_key": "secret", + "logs": map[string]any{ + "export_resource_info_on_event": true, + }, + }) + err := config.Unmarshal(configMap) + assert.Nil(t, err) + assert.Equal(t, true, config.LogsSettings.ExportResourceInfo) +} diff --git a/exporter/datasetexporter/datasetexporter.go b/exporter/datasetexporter/datasetexporter.go index 0c73d16dd108..6bbbc88b1521 100644 --- a/exporter/datasetexporter/datasetexporter.go +++ b/exporter/datasetexporter/datasetexporter.go @@ -24,6 +24,7 @@ type DatasetExporter struct { logger *zap.Logger session string spanTracker *spanTracker + exporterCfg *ExporterConfig } func newDatasetExporter(entity string, config *Config, logger *zap.Logger) (*DatasetExporter, error) { @@ -60,6 +61,7 @@ func newDatasetExporter(entity string, config *Config, logger *zap.Logger) (*Dat session: uuid.New().String(), logger: logger, spanTracker: tracker, + exporterCfg: exporterCfg, }, nil } diff --git a/exporter/datasetexporter/factory.go b/exporter/datasetexporter/factory.go index 24e067d9b2d9..b76b3bf4b519 100644 --- a/exporter/datasetexporter/factory.go +++ b/exporter/datasetexporter/factory.go @@ -27,6 +27,7 @@ func createDefaultConfig() component.Config { return &Config{ BufferSettings: newDefaultBufferSettings(), TracesSettings: newDefaultTracesSettings(), + LogsSettings: newDefaultLogsSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), diff --git a/exporter/datasetexporter/factory_test.go b/exporter/datasetexporter/factory_test.go index 77a4d3a288c8..6d2df780a697 100644 --- a/exporter/datasetexporter/factory_test.go +++ b/exporter/datasetexporter/factory_test.go @@ -49,6 +49,7 @@ func TestLoadConfig(t *testing.T) { APIKey: "key-minimal", BufferSettings: newDefaultBufferSettings(), TracesSettings: newDefaultTracesSettings(), + LogsSettings: newDefaultLogsSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), @@ -67,6 +68,7 @@ func TestLoadConfig(t *testing.T) { RetryMaxElapsedTime: bufferRetryMaxElapsedTime, }, TracesSettings: newDefaultTracesSettings(), + LogsSettings: newDefaultLogsSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), @@ -87,6 +89,9 @@ func TestLoadConfig(t *testing.T) { TracesSettings: TracesSettings{ MaxWait: 3 * time.Second, }, + LogsSettings: LogsSettings{ + ExportResourceInfo: true, + }, RetrySettings: exporterhelper.RetrySettings{ Enabled: true, InitialInterval: 11 * time.Nanosecond, @@ -133,7 +138,7 @@ func createExporterTests() []CreateTest { { name: "broken", config: &Config{}, - expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; config is not valid: api_key is required"), + expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; LogsSettings: {ExportResourceInfo:false}; config is not valid: api_key is required"), }, { name: "valid", diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index dc96ff6f75e1..aa8879ddcc10 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -63,7 +63,12 @@ func buildBody(attrs map[string]interface{}, value pcommon.Value) string { return message } -func buildEventFromLog(log plog.LogRecord, resource pcommon.Resource, scope pcommon.InstrumentationScope) *add_events.EventBundle { +func buildEventFromLog( + log plog.LogRecord, + resource pcommon.Resource, + scope pcommon.InstrumentationScope, + settings LogsSettings, +) *add_events.EventBundle { attrs := make(map[string]interface{}) event := add_events.Event{} @@ -104,7 +109,9 @@ func buildEventFromLog(log plog.LogRecord, resource pcommon.Resource, scope pcom attrs["flags"] = log.Flags() attrs["flag.is_sampled"] = log.Flags().IsSampled() - updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) + if settings.ExportResourceInfo == true { + updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) + } attrs["scope.name"] = scope.Name() updateWithPrefixedValues(attrs, "scope.attributes.", ".", scope.Attributes().AsRaw(), 0) @@ -130,7 +137,7 @@ func (e *DatasetExporter) consumeLogs(ctx context.Context, ld plog.Logs) error { logRecords := scopeLogs.At(j).LogRecords() for k := 0; k < logRecords.Len(); k++ { logRecord := logRecords.At(k) - events = append(events, buildEventFromLog(logRecord, resource, scope)) + events = append(events, buildEventFromLog(logRecord, resource, scope, e.exporterCfg.logsSettings)) } } } diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index e9530839f74e..a61f151dd13c 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -158,21 +158,20 @@ var testLEventRaw = &add_events.Event{ Sev: 9, Ts: "1581452773000000789", Attrs: map[string]interface{}{ - "attributes.app": "server", - "attributes.instance_num": int64(1), - "body.str": "This is a log message", - "body.type": "Str", - "dropped_attributes_count": uint32(1), - "flag.is_sampled": false, - "flags": plog.LogRecordFlags(0), - "message": "OtelExporter - Log - This is a log message", - "resource.attributes.resource-attr": "resource-attr-val-1", - "scope.name": "", - "severity.number": plog.SeverityNumberInfo, - "severity.text": "Info", - "span_id": "0102040800000000", - "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", - "trace_id": "08040201000000000000000000000000", + "attributes.app": "server", + "attributes.instance_num": int64(1), + "body.str": "This is a log message", + "body.type": "Str", + "dropped_attributes_count": uint32(1), + "flag.is_sampled": false, + "flags": plog.LogRecordFlags(0), + "message": "OtelExporter - Log - This is a log message", + "scope.name": "", + "severity.number": plog.SeverityNumberInfo, + "severity.text": "Info", + "span_id": "0102040800000000", + "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", + "trace_id": "08040201000000000000000000000000", }, } @@ -182,22 +181,21 @@ var testLEventReq = &add_events.Event{ Sev: testLEventRaw.Sev, Ts: testLEventRaw.Ts, Attrs: map[string]interface{}{ - "attributes.app": "server", - "attributes.instance_num": float64(1), - "body.str": "This is a log message", - "body.type": "Str", - "dropped_attributes_count": float64(1), - "flag.is_sampled": false, - "flags": float64(plog.LogRecordFlags(0)), - "message": "OtelExporter - Log - This is a log message", - "resource.attributes.resource-attr": "resource-attr-val-1", - "scope.name": "", - "severity.number": float64(plog.SeverityNumberInfo), - "severity.text": "Info", - "span_id": "0102040800000000", - "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", - "trace_id": "08040201000000000000000000000000", - "bundle_key": "d41d8cd98f00b204e9800998ecf8427e", + "attributes.app": "server", + "attributes.instance_num": float64(1), + "body.str": "This is a log message", + "body.type": "Str", + "dropped_attributes_count": float64(1), + "flag.is_sampled": false, + "flags": float64(plog.LogRecordFlags(0)), + "message": "OtelExporter - Log - This is a log message", + "scope.name": "", + "severity.number": float64(plog.SeverityNumberInfo), + "severity.text": "Info", + "span_id": "0102040800000000", + "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", + "trace_id": "08040201000000000000000000000000", + "bundle_key": "d41d8cd98f00b204e9800998ecf8427e", }, } @@ -224,6 +222,37 @@ func TestBuildEventFromLog(t *testing.T) { ld, lr.ResourceLogs().At(0).Resource(), lr.ResourceLogs().At(0).ScopeLogs().At(0).Scope(), + newDefaultLogsSettings(), + ) + + assert.Equal(t, expected, was) +} + +func TestBuildEventFromLogExportResources(t *testing.T) { + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + + defaultAttrs := testLEventRaw.Attrs + defaultAttrs["resource.attributes.resource-attr"] = "resource-attr-val-1" + + expected := &add_events.EventBundle{ + Event: &add_events.Event{ + Thread: testLEventRaw.Thread, + Log: testLEventRaw.Log, + Sev: testLEventRaw.Sev, + Ts: testLEventRaw.Ts, + Attrs: defaultAttrs, + }, + Thread: testLThread, + Log: testLLog, + } + was := buildEventFromLog( + ld, + lr.ResourceLogs().At(0).Resource(), + lr.ResourceLogs().At(0).ScopeLogs().At(0).Scope(), + LogsSettings{ + ExportResourceInfo: true, + }, ) assert.Equal(t, expected, was) diff --git a/exporter/datasetexporter/testdata/config.yaml b/exporter/datasetexporter/testdata/config.yaml index fa6b2c5c2697..61360b810919 100644 --- a/exporter/datasetexporter/testdata/config.yaml +++ b/exporter/datasetexporter/testdata/config.yaml @@ -25,6 +25,8 @@ dataset/full: retry_max_elapsed_time: 23s traces: max_wait: 3s + logs: + export_resource_info_on_event: true retry_on_failure: enabled: true initial_interval: 11 From b7b7c0e597f3597a1b68ad48d858701bd487b235 Mon Sep 17 00:00:00 2001 From: "tomas.zdara" Date: Wed, 7 Jun 2023 16:48:58 +0200 Subject: [PATCH 02/37] DSET-3998 - simplify --- exporter/datasetexporter/logs_exporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index aa8879ddcc10..233e8c582ed4 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -109,7 +109,7 @@ func buildEventFromLog( attrs["flags"] = log.Flags() attrs["flag.is_sampled"] = log.Flags().IsSampled() - if settings.ExportResourceInfo == true { + if settings.ExportResourceInfo { updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) } attrs["scope.name"] = scope.Name() From 322c5b586673a845f25c5c69687675f654872f4e Mon Sep 17 00:00:00 2001 From: "tomas.zdara" Date: Thu, 8 Jun 2023 10:09:51 +0200 Subject: [PATCH 03/37] DSET-3998 - improve docs --- exporter/datasetexporter/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index ab21849b7e0d..ceef4497e2ea 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -34,7 +34,7 @@ If you do not want to specify `api_key` in the file, you can use the [builtin fu - `aggregate` (default = false): Count the number of spans and errors belonging to a trace. - `max_wait` (default = 5s): The maximum waiting for all spans from single trace to arrive; ignored if `aggregate` is false. - `logs`: - - `export_resource_info_on_event` (default = false): Include resource info to DataSet Event while exporting Logs. + - `export_resource_info_on_event` (default = false): Include resource info to DataSet Event while exporting Logs. This is especially useful when reducing DataSet billable log volume. - `retry_on_failure`: See [retry_on_failure](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `sending_queue`: See [sending_queue](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `timeout`: See [timeout](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) From 1e4eaa66af6ccac2e30052fe74ee7855611dd804 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 8 Jun 2023 17:11:40 +0200 Subject: [PATCH 04/37] Fix log exporter to set AddEvents Event timestamp (ts) field to event observed timetamp in case LogRecord doesn't contain timestamp. Even though ObservedTimestamp should always be present, we fall back to current time in case it's not. In addition to that, remove duplicate and redundant "timestamp" attribute from the AddEvents event. --- exporter/datasetexporter/logs_exporter.go | 25 +++++++- .../datasetexporter/logs_exporter_test.go | 62 ++++++++++++++++++- 2 files changed, 82 insertions(+), 5 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index dc96ff6f75e1..389416af6c1b 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -17,6 +17,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) +var now = time.Now + func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config component.Config) (exporter.Logs, error) { cfg := castConfig(config) e, err := newDatasetExporter("logs", cfg, set.Logger) @@ -67,13 +69,13 @@ func buildEventFromLog(log plog.LogRecord, resource pcommon.Resource, scope pcom attrs := make(map[string]interface{}) event := add_events.Event{} + observedTs := log.ObservedTimestamp().AsTime() if sevNum := log.SeverityNumber(); sevNum > 0 { attrs["severity.number"] = sevNum event.Sev = int(sevNum) } if timestamp := log.Timestamp().AsTime(); !timestamp.Equal(time.Unix(0, 0)) { - attrs["timestamp"] = timestamp.String() event.Ts = strconv.FormatInt(timestamp.UnixNano(), 10) } @@ -86,8 +88,8 @@ func buildEventFromLog(log plog.LogRecord, resource pcommon.Resource, scope pcom if dropped := log.DroppedAttributesCount(); dropped > 0 { attrs["dropped_attributes_count"] = dropped } - if observed := log.ObservedTimestamp().AsTime(); !observed.Equal(time.Unix(0, 0)) { - attrs["observed.timestamp"] = observed.String() + if !observedTs.Equal(time.Unix(0, 0)) { + attrs["observed.timestamp"] = observedTs.String() } if sevText := log.SeverityText(); sevText != "" { attrs["severity.text"] = sevText @@ -100,6 +102,23 @@ func buildEventFromLog(log plog.LogRecord, resource pcommon.Resource, scope pcom attrs["trace_id"] = trace } + // Event needs to always have timestamp set otherwise it will get set to unix epoch start time + if event.Ts == "" { + fmt.Printf("laaa") + // ObservedTimestamp should always be set, but in case if it's not, we fall back to + // current time + // TODO: We should probably also do a rate limited log message here since this + // could indicate an issue with the current setup in case most events don't contain + // a timestamp. + if !observedTs.Equal(time.Unix(0, 0)) { + fmt.Printf("observed") + event.Ts = strconv.FormatInt(observedTs.UnixNano(), 10) + } else { + fmt.Printf("now") + event.Ts = strconv.FormatInt(now().UnixNano(), 10) + } + } + updateWithPrefixedValues(attrs, "attributes.", ".", log.Attributes().AsRaw(), 0) attrs["flags"] = log.Flags() attrs["flag.is_sampled"] = log.Flags().IsSampled() diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index e9530839f74e..1131694db974 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -11,6 +11,7 @@ import ( "io" "net/http" "net/http/httptest" + "strconv" "sync/atomic" "testing" "time" @@ -171,7 +172,6 @@ var testLEventRaw = &add_events.Event{ "severity.number": plog.SeverityNumberInfo, "severity.text": "Info", "span_id": "0102040800000000", - "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", "trace_id": "08040201000000000000000000000000", }, } @@ -195,7 +195,6 @@ var testLEventReq = &add_events.Event{ "severity.number": float64(plog.SeverityNumberInfo), "severity.text": "Info", "span_id": "0102040800000000", - "timestamp": "2020-02-11 20:26:13.000000789 +0000 UTC", "trace_id": "08040201000000000000000000000000", "bundle_key": "d41d8cd98f00b204e9800998ecf8427e", }, @@ -229,6 +228,65 @@ func TestBuildEventFromLog(t *testing.T) { assert.Equal(t, expected, was) } +func TestBuildEventFromLogEventWithoutTimestampWithObservedTimestampUseObservedTimestamp(t *testing.T) { + // When LogRecord doesn't have timestamp set, but it has ObservedTimestamp set, + // ObservedTimestamp should be used + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + + ld.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, 0))) + ld.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(1686235113, 0))) + + testLEventRaw.Ts = "1686235113000000000" + testLEventRaw.Attrs["observed.timestamp"] = "2023-06-08 14:38:33 +0000 UTC" + delete(testLEventRaw.Attrs, "timestamp") + + expected := &add_events.EventBundle{ + Event: testLEventRaw, + Thread: testLThread, + Log: testLLog, + } + was := buildEventFromLog( + ld, + lr.ResourceLogs().At(0).Resource(), + lr.ResourceLogs().At(0).ScopeLogs().At(0).Scope(), + ) + + assert.Equal(t, expected, was) +} + +func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurrentTimestamp(t *testing.T) { + // When LogRecord doesn't have timestampa and ObservedTimestamp set, current timestamp + // should be used + // We mock current time to ensure stability across runs + + now = func() time.Time { return time.Unix(123456789, 0) } + currentTime := now() + + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + + ld.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, 0))) + ld.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, 0))) + + testLEventRaw.Ts = strconv.FormatInt(currentTime.UnixNano(), 10) + delete(testLEventRaw.Attrs, "timestamp") + delete(testLEventRaw.Attrs, "observed.timestamp") + + expected := &add_events.EventBundle{ + Event: testLEventRaw, + Thread: testLThread, + Log: testLLog, + } + was := buildEventFromLog( + ld, + lr.ResourceLogs().At(0).Resource(), + lr.ResourceLogs().At(0).ScopeLogs().At(0).Scope(), + ) + + assert.Equal(t, expected, was) +} + func extract(req *http.Request) (add_events.AddEventsRequest, error) { data, _ := io.ReadAll(req.Body) b := bytes.NewBuffer(data) From 21e904108dd55d325d0e91e9b0f1065be2ecccfe Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 9 Jun 2023 12:10:00 +0200 Subject: [PATCH 05/37] Add additional assertions. --- exporter/datasetexporter/logs_exporter_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 9362c1e75cf2..545f40dc8210 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -293,6 +293,8 @@ func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurren now = func() time.Time { return time.Unix(123456789, 0) } currentTime := now() + assert.Equal(t, currentTime, time.Unix(123456789, 0)) + assert.Equal(t, strconv.FormatInt(currentTime.UnixNano(), 10), "123456789000000000") lr := testdata.GenerateLogsOneLogRecord() ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) From e03e0e2959c0193606b2116bbfc931d6007ec919 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 9 Jun 2023 12:10:51 +0200 Subject: [PATCH 06/37] Remove dummy debug logs. --- exporter/datasetexporter/logs_exporter.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 2f6c8fe26754..2aed24b9d027 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -109,17 +109,14 @@ func buildEventFromLog( // Event needs to always have timestamp set otherwise it will get set to unix epoch start time if event.Ts == "" { - fmt.Printf("laaa") // ObservedTimestamp should always be set, but in case if it's not, we fall back to // current time // TODO: We should probably also do a rate limited log message here since this // could indicate an issue with the current setup in case most events don't contain // a timestamp. if !observedTs.Equal(time.Unix(0, 0)) { - fmt.Printf("observed") event.Ts = strconv.FormatInt(observedTs.UnixNano(), 10) } else { - fmt.Printf("now") event.Ts = strconv.FormatInt(now().UnixNano(), 10) } } From a3d5e0d9fdf0a54e637d3baffdc939206c02e3f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20=C5=BD=C4=8F=C3=A1ra?= Date: Tue, 13 Jun 2023 09:13:40 +0200 Subject: [PATCH 07/37] Create export-logs-resource-info-based-configuration --- ...ort-logs-resource-info-based-configuration | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .chloggen/export-logs-resource-info-based-configuration diff --git a/.chloggen/export-logs-resource-info-based-configuration b/.chloggen/export-logs-resource-info-based-configuration new file mode 100644 index 000000000000..c68e8af48532 --- /dev/null +++ b/.chloggen/export-logs-resource-info-based-configuration @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement', 'bug_fix' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: datasetexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Allow include Logs resource info export to DataSet based on new export_resource_info_on_event configuration. Fix timestamp handling." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [20660] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: From 65e9b0c0c5e73b29f88f4f9694a29f08c6c974c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20=C5=BD=C4=8F=C3=A1ra?= Date: Tue, 13 Jun 2023 17:47:05 +0200 Subject: [PATCH 08/37] address PR notes - fix changelog gen --- .chloggen/export-logs-resource-info-based-configuration | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.chloggen/export-logs-resource-info-based-configuration b/.chloggen/export-logs-resource-info-based-configuration index c68e8af48532..8e4872151206 100644 --- a/.chloggen/export-logs-resource-info-based-configuration +++ b/.chloggen/export-logs-resource-info-based-configuration @@ -3,7 +3,7 @@ # you should instead start your pull request title with [chore] or use the "Skip Changelog" label. # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: 'enhancement', 'bug_fix' +change_type: 'enhancement' # The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) component: datasetexporter From d6cc185cedf9a9d582b55b7b312b1a405d577e5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20=C5=BD=C4=8F=C3=A1ra?= Date: Tue, 13 Jun 2023 17:48:19 +0200 Subject: [PATCH 09/37] fix docs typo --- exporter/datasetexporter/logs_exporter_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 545f40dc8210..e61d42932b3a 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -287,7 +287,7 @@ func TestBuildEventFromLogEventWithoutTimestampWithObservedTimestampUseObservedT } func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurrentTimestamp(t *testing.T) { - // When LogRecord doesn't have timestampa and ObservedTimestamp set, current timestamp + // When LogRecord doesn't have timestamp and ObservedTimestamp set, current timestamp // should be used // We mock current time to ensure stability across runs From 9f987ac977e2f7e0654199b86bcb87f82ec0c700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20=C5=BD=C4=8F=C3=A1ra?= Date: Tue, 13 Jun 2023 17:54:22 +0200 Subject: [PATCH 10/37] fix changelog file suffix --- ...uration => export-logs-resource-info-based-configuration.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .chloggen/{export-logs-resource-info-based-configuration => export-logs-resource-info-based-configuration.yaml} (100%) diff --git a/.chloggen/export-logs-resource-info-based-configuration b/.chloggen/export-logs-resource-info-based-configuration.yaml similarity index 100% rename from .chloggen/export-logs-resource-info-based-configuration rename to .chloggen/export-logs-resource-info-based-configuration.yaml From 52e8651323fa92399beb0eee9eb44c5c23a9b24b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 20 Jun 2023 08:54:33 +0200 Subject: [PATCH 11/37] Remove "OtelExporter - Log -" suffix from the event "message" field. This prefix is nor desired nor wanted. --- exporter/datasetexporter/logs_exporter.go | 5 +---- exporter/datasetexporter/logs_exporter_test.go | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 2aed24b9d027..4f984760d309 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -85,10 +85,7 @@ func buildEventFromLog( } if body := log.Body().AsString(); body != "" { - attrs["message"] = fmt.Sprintf( - "OtelExporter - Log - %s", - buildBody(attrs, log.Body()), - ) + attrs["message"] = buildBody(attrs, log.Body()) } if dropped := log.DroppedAttributesCount(); dropped > 0 { attrs["dropped_attributes_count"] = dropped diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index e61d42932b3a..5c12495120b5 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -166,7 +166,7 @@ var testLEventRaw = &add_events.Event{ "dropped_attributes_count": uint32(1), "flag.is_sampled": false, "flags": plog.LogRecordFlags(0), - "message": "OtelExporter - Log - This is a log message", + "message": "This is a log message", "scope.name": "", "severity.number": plog.SeverityNumberInfo, "severity.text": "Info", @@ -188,7 +188,7 @@ var testLEventReq = &add_events.Event{ "dropped_attributes_count": float64(1), "flag.is_sampled": false, "flags": float64(plog.LogRecordFlags(0)), - "message": "OtelExporter - Log - This is a log message", + "message": "This is a log message", "scope.name": "", "severity.number": float64(plog.SeverityNumberInfo), "severity.text": "Info", From 1b05c7879264245b10cbfde0c653c2d59899190b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 20 Jun 2023 10:07:23 +0200 Subject: [PATCH 12/37] Implement severity field handling and make sure we currently map OTel severity to DataSet severity. Also remove "severity.text" and "severity.number" field from the event since it's redundant - we already have event severity (sev) field value. --- exporter/datasetexporter/logs_exporter.go | 77 ++++++++- .../datasetexporter/logs_exporter_test.go | 158 +++++++++++++++++- 2 files changed, 223 insertions(+), 12 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 4f984760d309..b9479863f325 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "strconv" + "strings" "time" "github.com/scalyr/dataset-go/pkg/api/add_events" @@ -65,6 +66,73 @@ func buildBody(attrs map[string]interface{}, value pcommon.Value) string { return message } +// Function maps OTel severity on the LogRecord to DataSet severity level (number) +func otelSeverityToDataSetSeverity(log plog.LogRecord) int { + // If log record doesn't contain severity or we can't map it to a valid DataSet severity, + // we use this value (INFO) instead + defaultSeverityLevel := 3 + + // This function maps OTel severity level to DataSet severity levels + // + // Valid OTel levels - https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber + // and valid DataSet ones - https://github.com/scalyr/logstash-output-scalyr/blob/master/lib/logstash/outputs/scalyr.rb#L70 + sevNum := log.SeverityNumber() + sevText := log.SeverityText() + + dataSetSeverity := defaultSeverityLevel + + if sevNum > 0 { + // See https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber + // for OTEL mappings + switch sevNum { + case 1, 2, 3, 4: + // TRACE + dataSetSeverity = 1 + case 5, 6, 7, 8: + // DEBUG + dataSetSeverity = 2 + case 9, 10, 11, 12: + // INFO + dataSetSeverity = 3 + case 13, 14, 15, 16: + // WARN + dataSetSeverity = 4 + case 17, 18, 19, 20: + // ERROR + dataSetSeverity = 5 + case 21, 22, 23, 24: + // FATAL / CRITICAL / EMERGENCY + dataSetSeverity = 6 + } + } else if sevText != "" { + // Per docs, SeverityNumber is optional so if it's not present we fall back to SeverityText + // https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext + switch strings.ToLower(sevText) { + case "fine", "finest": + dataSetSeverity = 0 + case "trace": + dataSetSeverity = 1 + case "debug": + dataSetSeverity = 2 + case "info", "information": + dataSetSeverity = 3 + case "warn", "warning": + dataSetSeverity = 4 + case "error": + dataSetSeverity = 5 + case "fatal", "critical", "emergency": + dataSetSeverity = 6 + } + } + + // TODO: We should log in case we see invalid severity, but right now, afaik, we / OTEL + // don't have a concept of "rate limited" logging. We don't want to log every single + // occurrence in case there are many log records like that since this could cause a lot of + // noise and performance overhead + + return dataSetSeverity +} + func buildEventFromLog( log plog.LogRecord, resource pcommon.Resource, @@ -75,10 +143,8 @@ func buildEventFromLog( event := add_events.Event{} observedTs := log.ObservedTimestamp().AsTime() - if sevNum := log.SeverityNumber(); sevNum > 0 { - attrs["severity.number"] = sevNum - event.Sev = int(sevNum) - } + + event.Sev = otelSeverityToDataSetSeverity(log) if timestamp := log.Timestamp().AsTime(); !timestamp.Equal(time.Unix(0, 0)) { event.Ts = strconv.FormatInt(timestamp.UnixNano(), 10) @@ -93,9 +159,6 @@ func buildEventFromLog( if !observedTs.Equal(time.Unix(0, 0)) { attrs["observed.timestamp"] = observedTs.String() } - if sevText := log.SeverityText(); sevText != "" { - attrs["severity.text"] = sevText - } if span := log.SpanID().String(); span != "" { attrs["span_id"] = span } diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 5c12495120b5..d37c66aaa0cc 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -156,7 +156,7 @@ func TestBuildBodyMap(t *testing.T) { var testLEventRaw = &add_events.Event{ Thread: "TL", Log: "LL", - Sev: 9, + Sev: 3, Ts: "1581452773000000789", Attrs: map[string]interface{}{ "attributes.app": "server", @@ -168,8 +168,6 @@ var testLEventRaw = &add_events.Event{ "flags": plog.LogRecordFlags(0), "message": "This is a log message", "scope.name": "", - "severity.number": plog.SeverityNumberInfo, - "severity.text": "Info", "span_id": "0102040800000000", "trace_id": "08040201000000000000000000000000", }, @@ -190,8 +188,6 @@ var testLEventReq = &add_events.Event{ "flags": float64(plog.LogRecordFlags(0)), "message": "This is a log message", "scope.name": "", - "severity.number": float64(plog.SeverityNumberInfo), - "severity.text": "Info", "span_id": "0102040800000000", "trace_id": "08040201000000000000000000000000", "bundle_key": "d41d8cd98f00b204e9800998ecf8427e", @@ -407,3 +403,155 @@ func TestConsumeLogsShouldSucceed(t *testing.T) { addRequest, ) } + +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityText(t *testing.T) { + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + ld.SetSeverityText("") + + // trace + ld.SetSeverityNumber(1) + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(2) + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(3) + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(4) + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + // debug + ld.SetSeverityNumber(5) + assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(6) + assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(7) + assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(8) + assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + + // info + ld.SetSeverityNumber(9) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(10) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(11) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(12) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + // warn + ld.SetSeverityNumber(13) + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(14) + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(15) + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(16) + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + // error + ld.SetSeverityNumber(17) + assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(18) + assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(19) + assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(20) + assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + + // fatal + ld.SetSeverityNumber(21) + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(22) + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(22) + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(24) + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + // Invalid values get mapped to info (3) + ld.SetSeverityNumber(0) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(-1) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(25) + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + +} + +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumber(t *testing.T) { + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + ld.SetSeverityNumber(0) + + // trace + ld.SetSeverityText("trace") + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + // debug + ld.SetSeverityText("debug") + assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + + // info + ld.SetSeverityText("info") + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityText("informational") + assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + + // warn + ld.SetSeverityText("warn") + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityText("warning") + assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + + // error + ld.SetSeverityText("error") + assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + + // fatal + ld.SetSeverityText("fatal") + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityText("critical") + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityText("emergency") + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) +} + +func TestOtelSeverityToDataSetSeverityWithSeverityNumberAndSeverityTextSeverityNumberHasPriority(t *testing.T) { + // If provided, SeverityNumber has priority over SeverityText + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + + ld.SetSeverityNumber(3) + ld.SetSeverityText("debug") + assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(22) + ld.SetSeverityText("info") + assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + +} From 4cd82589aea8b9ad63afab3ad98bbb968db7e3e4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 21 Jun 2023 11:02:46 +0200 Subject: [PATCH 13/37] Remove flags and flags.is_sampled field since it provides no additional information which is not already available (events which are sampled will already be ingested and visible in the ui so flags.is_sampled is redundant). --- exporter/datasetexporter/logs_exporter.go | 2 -- exporter/datasetexporter/logs_exporter_test.go | 5 ----- 2 files changed, 7 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index b9479863f325..c3fb21c2fea9 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -182,8 +182,6 @@ func buildEventFromLog( } updateWithPrefixedValues(attrs, "attributes.", ".", log.Attributes().AsRaw(), 0) - attrs["flags"] = log.Flags() - attrs["flag.is_sampled"] = log.Flags().IsSampled() if settings.ExportResourceInfo { updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index d37c66aaa0cc..23d67caf3190 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -23,7 +23,6 @@ import ( "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -164,8 +163,6 @@ var testLEventRaw = &add_events.Event{ "body.str": "This is a log message", "body.type": "Str", "dropped_attributes_count": uint32(1), - "flag.is_sampled": false, - "flags": plog.LogRecordFlags(0), "message": "This is a log message", "scope.name": "", "span_id": "0102040800000000", @@ -184,8 +181,6 @@ var testLEventReq = &add_events.Event{ "body.str": "This is a log message", "body.type": "Str", "dropped_attributes_count": float64(1), - "flag.is_sampled": false, - "flags": float64(plog.LogRecordFlags(0)), "message": "This is a log message", "scope.name": "", "span_id": "0102040800000000", From a80ac482a04250e01c1a9527d6b86ded73531dd6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:17:08 +0200 Subject: [PATCH 14/37] Update metadata and indicate plugin is distributed as part of otel contrib. --- exporter/datasetexporter/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/metadata.yaml b/exporter/datasetexporter/metadata.yaml index 89a9faf47ea4..b7abd4cf08a6 100644 --- a/exporter/datasetexporter/metadata.yaml +++ b/exporter/datasetexporter/metadata.yaml @@ -4,4 +4,4 @@ status: class: exporter stability: alpha: [logs, traces] - distributions: [] + distributions: [contrib] From af8f0998eec149a16e4a0dd276187c7c60464da3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:19:22 +0200 Subject: [PATCH 15/37] Move default severity to be a module level constant instead of function level constant. --- exporter/datasetexporter/logs_exporter.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index c3fb21c2fea9..b8d166a64485 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -20,6 +20,10 @@ import ( var now = time.Now +// If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use +// this value (3 - INFO) instead +var defaultSeverityLevel = 3 + func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config component.Config) (exporter.Logs, error) { cfg := castConfig(config) e, err := newDatasetExporter("logs", cfg, set.Logger) @@ -68,10 +72,6 @@ func buildBody(attrs map[string]interface{}, value pcommon.Value) string { // Function maps OTel severity on the LogRecord to DataSet severity level (number) func otelSeverityToDataSetSeverity(log plog.LogRecord) int { - // If log record doesn't contain severity or we can't map it to a valid DataSet severity, - // we use this value (INFO) instead - defaultSeverityLevel := 3 - // This function maps OTel severity level to DataSet severity levels // // Valid OTel levels - https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber From 1e17c4a91d9ae11658e7bbbf0de29fcc3a8256a6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:30:12 +0200 Subject: [PATCH 16/37] Refactor / split otelSeverityToDataSetSeverity to two functions. --- exporter/datasetexporter/logs_exporter.go | 106 ++++++++++++++-------- 1 file changed, 66 insertions(+), 40 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index b8d166a64485..89515cc3bffa 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -22,7 +22,7 @@ var now = time.Now // If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use // this value (3 - INFO) instead -var defaultSeverityLevel = 3 +var defaultDataSetSeverityLevel = 3 func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config component.Config) (exporter.Logs, error) { cfg := castConfig(config) @@ -79,50 +79,14 @@ func otelSeverityToDataSetSeverity(log plog.LogRecord) int { sevNum := log.SeverityNumber() sevText := log.SeverityText() - dataSetSeverity := defaultSeverityLevel + dataSetSeverity := defaultDataSetSeverityLevel if sevNum > 0 { - // See https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber - // for OTEL mappings - switch sevNum { - case 1, 2, 3, 4: - // TRACE - dataSetSeverity = 1 - case 5, 6, 7, 8: - // DEBUG - dataSetSeverity = 2 - case 9, 10, 11, 12: - // INFO - dataSetSeverity = 3 - case 13, 14, 15, 16: - // WARN - dataSetSeverity = 4 - case 17, 18, 19, 20: - // ERROR - dataSetSeverity = 5 - case 21, 22, 23, 24: - // FATAL / CRITICAL / EMERGENCY - dataSetSeverity = 6 - } + dataSetSeverity = logRecordSevNumToDataSetSeverity(sevNum) } else if sevText != "" { // Per docs, SeverityNumber is optional so if it's not present we fall back to SeverityText // https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext - switch strings.ToLower(sevText) { - case "fine", "finest": - dataSetSeverity = 0 - case "trace": - dataSetSeverity = 1 - case "debug": - dataSetSeverity = 2 - case "info", "information": - dataSetSeverity = 3 - case "warn", "warning": - dataSetSeverity = 4 - case "error": - dataSetSeverity = 5 - case "fatal", "critical", "emergency": - dataSetSeverity = 6 - } + dataSetSeverity = logRecordSeverityTextToDataSetSeverity(sevText) } // TODO: We should log in case we see invalid severity, but right now, afaik, we / OTEL @@ -133,6 +97,68 @@ func otelSeverityToDataSetSeverity(log plog.LogRecord) int { return dataSetSeverity } +func logRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { + // Maps LogRecord.SeverityNumber field value to DataSet severity value. + dataSetSeverity := defaultDataSetSeverityLevel + + if sevNum <= 0 { + return defaultDataSetSeverityLevel + } + + // See https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber + // for OTEL mappings + switch sevNum { + case 1, 2, 3, 4: + // TRACE + dataSetSeverity = 1 + case 5, 6, 7, 8: + // DEBUG + dataSetSeverity = 2 + case 9, 10, 11, 12: + // INFO + dataSetSeverity = 3 + case 13, 14, 15, 16: + // WARN + dataSetSeverity = 4 + case 17, 18, 19, 20: + // ERROR + dataSetSeverity = 5 + case 21, 22, 23, 24: + // FATAL / CRITICAL / EMERGENCY + dataSetSeverity = 6 + } + + return dataSetSeverity +} + +func logRecordSeverityTextToDataSetSeverity(sevText string) int { + // Maps LogRecord.SeverityText field value to DataSet severity value. + dataSetSeverity := defaultDataSetSeverityLevel + + if sevText == "" { + return dataSetSeverity + } + + switch strings.ToLower(sevText) { + case "fine", "finest": + dataSetSeverity = 0 + case "trace": + dataSetSeverity = 1 + case "debug": + dataSetSeverity = 2 + case "info", "information": + dataSetSeverity = 3 + case "warn", "warning": + dataSetSeverity = 4 + case "error": + dataSetSeverity = 5 + case "fatal", "critical", "emergency": + dataSetSeverity = 6 + } + + return dataSetSeverity +} + func buildEventFromLog( log plog.LogRecord, resource pcommon.Resource, From 79120e7c57ff83bfbc265695e601befff62a1dee Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:33:34 +0200 Subject: [PATCH 17/37] Also add test cases for invalid values. --- exporter/datasetexporter/logs_exporter_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 23d67caf3190..b429e25f412a 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -404,6 +404,16 @@ func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityText(t *testin ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) ld.SetSeverityText("") + // invalid values + ld.SetSeverityNumber(0) + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(-1) + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityNumber(100) + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + // trace ld.SetSeverityNumber(1) assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) @@ -499,6 +509,13 @@ func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumber(t *testin ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) ld.SetSeverityNumber(0) + // invalid values + ld.SetSeverityText("a") + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + + ld.SetSeverityText("infofoo") + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + // trace ld.SetSeverityText("trace") assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) From f20f199c1cf73a2962fff083b3b72b3a2cd612cf Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:35:50 +0200 Subject: [PATCH 18/37] Make it a module level constant. --- exporter/datasetexporter/logs_exporter.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 89515cc3bffa..72bb73c1062e 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -22,7 +22,7 @@ var now = time.Now // If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use // this value (3 - INFO) instead -var defaultDataSetSeverityLevel = 3 +const defaultDataSetSeverityLevel int = 3 func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config component.Config) (exporter.Logs, error) { cfg := castConfig(config) @@ -102,7 +102,7 @@ func logRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { dataSetSeverity := defaultDataSetSeverityLevel if sevNum <= 0 { - return defaultDataSetSeverityLevel + return dataSetSeverity } // See https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber From 206c8eb658f13e709faefca20f6ee5a7b4f532a8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 22 Jun 2023 20:41:20 +0200 Subject: [PATCH 19/37] Make dataset log levels constants for easier readability and maintaince and update affected code and tests. --- exporter/datasetexporter/logs_exporter.go | 39 ++++++---- .../datasetexporter/logs_exporter_test.go | 72 +++++++++---------- 2 files changed, 61 insertions(+), 50 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 72bb73c1062e..627f582ac29c 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -22,7 +22,18 @@ var now = time.Now // If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use // this value (3 - INFO) instead -const defaultDataSetSeverityLevel int = 3 +const defaultDataSetSeverityLevel int = dataSetLogLevelInfo + +// Constants for valid DataSet log levels (aka Event.sev int field value) +const ( + dataSetLogLevelFinest = 0 + dataSetLogLevelTrace = 1 + dataSetLogLevelDebug = 2 + dataSetLogLevelInfo = 3 + dataSetLogLevelWarn = 4 + dataSetLogLevelError = 5 + dataSetLogLevelFatal = 6 +) func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config component.Config) (exporter.Logs, error) { cfg := castConfig(config) @@ -110,22 +121,22 @@ func logRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { switch sevNum { case 1, 2, 3, 4: // TRACE - dataSetSeverity = 1 + dataSetSeverity = dataSetLogLevelTrace case 5, 6, 7, 8: // DEBUG - dataSetSeverity = 2 + dataSetSeverity = dataSetLogLevelDebug case 9, 10, 11, 12: // INFO - dataSetSeverity = 3 + dataSetSeverity = dataSetLogLevelInfo case 13, 14, 15, 16: // WARN - dataSetSeverity = 4 + dataSetSeverity = dataSetLogLevelWarn case 17, 18, 19, 20: // ERROR - dataSetSeverity = 5 + dataSetSeverity = dataSetLogLevelError case 21, 22, 23, 24: // FATAL / CRITICAL / EMERGENCY - dataSetSeverity = 6 + dataSetSeverity = dataSetLogLevelFatal } return dataSetSeverity @@ -141,19 +152,19 @@ func logRecordSeverityTextToDataSetSeverity(sevText string) int { switch strings.ToLower(sevText) { case "fine", "finest": - dataSetSeverity = 0 + dataSetSeverity = dataSetLogLevelFinest case "trace": - dataSetSeverity = 1 + dataSetSeverity = dataSetLogLevelTrace case "debug": - dataSetSeverity = 2 + dataSetSeverity = dataSetLogLevelDebug case "info", "information": - dataSetSeverity = 3 + dataSetSeverity = dataSetLogLevelInfo case "warn", "warning": - dataSetSeverity = 4 + dataSetSeverity = dataSetLogLevelWarn case "error": - dataSetSeverity = 5 + dataSetSeverity = dataSetLogLevelError case "fatal", "critical", "emergency": - dataSetSeverity = 6 + dataSetSeverity = dataSetLogLevelFatal } return dataSetSeverity diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index b429e25f412a..7ad7545df1e3 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -416,88 +416,88 @@ func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityText(t *testin // trace ld.SetSeverityNumber(1) - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(2) - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(3) - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(4) - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) // debug ld.SetSeverityNumber(5) - assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(6) - assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(7) - assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(8) - assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) // info ld.SetSeverityNumber(9) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(10) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(11) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(12) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) // warn ld.SetSeverityNumber(13) - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(14) - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(15) - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(16) - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) // error ld.SetSeverityNumber(17) - assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(18) - assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(19) - assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(20) - assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) // fatal ld.SetSeverityNumber(21) - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(22) - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(22) - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(24) - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) // Invalid values get mapped to info (3) ld.SetSeverityNumber(0) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(-1) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(25) assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) @@ -518,39 +518,39 @@ func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumber(t *testin // trace ld.SetSeverityText("trace") - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) // debug ld.SetSeverityText("debug") - assert.Equal(t, 2, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) // info ld.SetSeverityText("info") - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("informational") - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) // warn ld.SetSeverityText("warn") - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("warning") - assert.Equal(t, 4, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) // error ld.SetSeverityText("error") - assert.Equal(t, 5, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) // fatal ld.SetSeverityText("fatal") - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("critical") - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("emergency") - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) } func TestOtelSeverityToDataSetSeverityWithSeverityNumberAndSeverityTextSeverityNumberHasPriority(t *testing.T) { From 21c07bef422bf7dd8140593b19da745ebc4254b6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 Jun 2023 13:40:30 +0200 Subject: [PATCH 20/37] Re-generate readme. --- exporter/datasetexporter/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index ceef4497e2ea..dcf867a07713 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -4,9 +4,10 @@ | Status | | | ------------- |-----------| | Stability | [alpha]: logs, traces | -| Distributions | [] | +| Distributions | [contrib] | [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib This exporter sends logs to [DataSet](https://www.dataset.com/). From 211db7e388fb3e45cf66a9fb3179d606b1cc3788 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 Jun 2023 15:44:25 +0200 Subject: [PATCH 21/37] Per PR review, add "map" suffix to the function names. --- exporter/datasetexporter/logs_exporter.go | 12 +-- .../datasetexporter/logs_exporter_test.go | 88 +++++++++---------- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 627f582ac29c..f3b3a379ed1b 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -82,7 +82,7 @@ func buildBody(attrs map[string]interface{}, value pcommon.Value) string { } // Function maps OTel severity on the LogRecord to DataSet severity level (number) -func otelSeverityToDataSetSeverity(log plog.LogRecord) int { +func mapOtelSeverityToDataSetSeverity(log plog.LogRecord) int { // This function maps OTel severity level to DataSet severity levels // // Valid OTel levels - https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber @@ -93,11 +93,11 @@ func otelSeverityToDataSetSeverity(log plog.LogRecord) int { dataSetSeverity := defaultDataSetSeverityLevel if sevNum > 0 { - dataSetSeverity = logRecordSevNumToDataSetSeverity(sevNum) + dataSetSeverity = mapLogRecordSevNumToDataSetSeverity(sevNum) } else if sevText != "" { // Per docs, SeverityNumber is optional so if it's not present we fall back to SeverityText // https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext - dataSetSeverity = logRecordSeverityTextToDataSetSeverity(sevText) + dataSetSeverity = mapLogRecordSeverityTextToDataSetSeverity(sevText) } // TODO: We should log in case we see invalid severity, but right now, afaik, we / OTEL @@ -108,7 +108,7 @@ func otelSeverityToDataSetSeverity(log plog.LogRecord) int { return dataSetSeverity } -func logRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { +func mapLogRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { // Maps LogRecord.SeverityNumber field value to DataSet severity value. dataSetSeverity := defaultDataSetSeverityLevel @@ -142,7 +142,7 @@ func logRecordSevNumToDataSetSeverity(sevNum plog.SeverityNumber) int { return dataSetSeverity } -func logRecordSeverityTextToDataSetSeverity(sevText string) int { +func mapLogRecordSeverityTextToDataSetSeverity(sevText string) int { // Maps LogRecord.SeverityText field value to DataSet severity value. dataSetSeverity := defaultDataSetSeverityLevel @@ -181,7 +181,7 @@ func buildEventFromLog( observedTs := log.ObservedTimestamp().AsTime() - event.Sev = otelSeverityToDataSetSeverity(log) + event.Sev = mapOtelSeverityToDataSetSeverity(log) if timestamp := log.Timestamp().AsTime(); !timestamp.Equal(time.Unix(0, 0)) { event.Ts = strconv.FormatInt(timestamp.UnixNano(), 10) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 7ad7545df1e3..c52b284ce9a5 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -406,101 +406,101 @@ func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityText(t *testin // invalid values ld.SetSeverityNumber(0) - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(-1) - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(100) - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) // trace ld.SetSeverityNumber(1) - assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(2) - assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(3) - assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(4) - assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) // debug ld.SetSeverityNumber(5) - assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(6) - assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(7) - assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(8) - assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) // info ld.SetSeverityNumber(9) - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(10) - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(11) - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(12) - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) // warn ld.SetSeverityNumber(13) - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(14) - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(15) - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(16) - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) // error ld.SetSeverityNumber(17) - assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(18) - assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(19) - assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(20) - assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) // fatal ld.SetSeverityNumber(21) - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(22) - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(22) - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(24) - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) // Invalid values get mapped to info (3) ld.SetSeverityNumber(0) - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(-1) - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(25) - assert.Equal(t, 3, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, 3, mapOtelSeverityToDataSetSeverity(ld)) } @@ -511,46 +511,46 @@ func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumber(t *testin // invalid values ld.SetSeverityText("a") - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("infofoo") - assert.Equal(t, defaultDataSetSeverityLevel, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) // trace ld.SetSeverityText("trace") - assert.Equal(t, dataSetLogLevelTrace, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) // debug ld.SetSeverityText("debug") - assert.Equal(t, dataSetLogLevelDebug, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) // info ld.SetSeverityText("info") - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("informational") - assert.Equal(t, dataSetLogLevelInfo, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) // warn ld.SetSeverityText("warn") - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("warning") - assert.Equal(t, dataSetLogLevelWarn, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) // error ld.SetSeverityText("error") - assert.Equal(t, dataSetLogLevelError, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) // fatal ld.SetSeverityText("fatal") - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("critical") - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityText("emergency") - assert.Equal(t, dataSetLogLevelFatal, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) } func TestOtelSeverityToDataSetSeverityWithSeverityNumberAndSeverityTextSeverityNumberHasPriority(t *testing.T) { @@ -560,10 +560,10 @@ func TestOtelSeverityToDataSetSeverityWithSeverityNumberAndSeverityTextSeverityN ld.SetSeverityNumber(3) ld.SetSeverityText("debug") - assert.Equal(t, 1, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, 1, mapOtelSeverityToDataSetSeverity(ld)) ld.SetSeverityNumber(22) ld.SetSeverityText("info") - assert.Equal(t, 6, otelSeverityToDataSetSeverity(ld)) + assert.Equal(t, 6, mapOtelSeverityToDataSetSeverity(ld)) } From 656cc5fccdbc92d74bcaad1b4c9bb7e387ac11b9 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 Jun 2023 16:00:33 +0200 Subject: [PATCH 22/37] For readability, split different test scenarios in different test functions. Also move functionality for making a test record into a utility function. --- .../datasetexporter/logs_exporter_test.go | 148 ++++++++++-------- 1 file changed, 82 insertions(+), 66 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index c52b284ce9a5..92824824df10 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -399,171 +400,186 @@ func TestConsumeLogsShouldSucceed(t *testing.T) { ) } -func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityText(t *testing.T) { +func makeLogRecordWithSeverityNumberAndSeverityText(sevNum int, sevText string) plog.LogRecord { lr := testdata.GenerateLogsOneLogRecord() ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) - ld.SetSeverityText("") - // invalid values - ld.SetSeverityNumber(0) + ld.SetSeverityNumber(plog.SeverityNumber(sevNum)) + ld.SetSeverityText(sevText) + + return ld +} + +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextInvalidValues(t *testing.T) { + // Invalid values get mapped to info (3 - INFO) + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "") assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(-1) + ld = makeLogRecordWithSeverityNumberAndSeverityText(-1, "") assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(100) + ld = makeLogRecordWithSeverityNumberAndSeverityText(25, "") assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) + ld = makeLogRecordWithSeverityNumberAndSeverityText(100, "") + assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) + +} + +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetTraceLogLevel(t *testing.T) { // trace - ld.SetSeverityNumber(1) + ld := makeLogRecordWithSeverityNumberAndSeverityText(1, "") assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(2) + ld = makeLogRecordWithSeverityNumberAndSeverityText(2, "") assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(3) + ld = makeLogRecordWithSeverityNumberAndSeverityText(3, "") assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(4) + ld = makeLogRecordWithSeverityNumberAndSeverityText(4, "") assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetDebugLogLevel(t *testing.T) { // debug - ld.SetSeverityNumber(5) + ld := makeLogRecordWithSeverityNumberAndSeverityText(5, "") assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(6) + ld = makeLogRecordWithSeverityNumberAndSeverityText(6, "") assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(7) + ld = makeLogRecordWithSeverityNumberAndSeverityText(7, "") assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(8) + ld = makeLogRecordWithSeverityNumberAndSeverityText(8, "") assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetInfoLogLevel(t *testing.T) { // info - ld.SetSeverityNumber(9) + ld := makeLogRecordWithSeverityNumberAndSeverityText(9, "") assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(10) + ld = makeLogRecordWithSeverityNumberAndSeverityText(10, "") assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(11) + ld = makeLogRecordWithSeverityNumberAndSeverityText(11, "") assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(12) + ld = makeLogRecordWithSeverityNumberAndSeverityText(12, "") assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetWarnLogLevel(t *testing.T) { // warn - ld.SetSeverityNumber(13) + ld := makeLogRecordWithSeverityNumberAndSeverityText(13, "") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(14) + ld = makeLogRecordWithSeverityNumberAndSeverityText(14, "") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(15) + ld = makeLogRecordWithSeverityNumberAndSeverityText(15, "") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(16) + ld = makeLogRecordWithSeverityNumberAndSeverityText(16, "") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetErrorLogLevel(t *testing.T) { // error - ld.SetSeverityNumber(17) + ld := makeLogRecordWithSeverityNumberAndSeverityText(17, "") assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(18) + ld = makeLogRecordWithSeverityNumberAndSeverityText(18, "") assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(19) + ld = makeLogRecordWithSeverityNumberAndSeverityText(19, "") assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(20) + ld = makeLogRecordWithSeverityNumberAndSeverityText(20, "") assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityNumberNoSeverityTextDataSetFatalLogLevel(t *testing.T) { // fatal - ld.SetSeverityNumber(21) + ld := makeLogRecordWithSeverityNumberAndSeverityText(21, "") assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) + ld = makeLogRecordWithSeverityNumberAndSeverityText(22, "") ld.SetSeverityNumber(22) assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) + ld = makeLogRecordWithSeverityNumberAndSeverityText(23, "") ld.SetSeverityNumber(22) assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityNumber(24) + ld = makeLogRecordWithSeverityNumberAndSeverityText(24, "") assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) - - // Invalid values get mapped to info (3) - ld.SetSeverityNumber(0) - assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) - - ld.SetSeverityNumber(-1) - assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) - - ld.SetSeverityNumber(25) - assert.Equal(t, 3, mapOtelSeverityToDataSetSeverity(ld)) - } -func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumber(t *testing.T) { - lr := testdata.GenerateLogsOneLogRecord() - ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) - ld.SetSeverityNumber(0) - +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberInvalidValues(t *testing.T) { // invalid values - ld.SetSeverityText("a") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "a") assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityText("infofoo") + ld = makeLogRecordWithSeverityNumberAndSeverityText(0, "infoinfo") assert.Equal(t, defaultDataSetSeverityLevel, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetTraceLogLevel(t *testing.T) { // trace - ld.SetSeverityText("trace") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "trace") assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetDebugLogLevel(t *testing.T) { // debug - ld.SetSeverityText("debug") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "debug") assert.Equal(t, dataSetLogLevelDebug, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetInfoLogLevel(t *testing.T) { // info - ld.SetSeverityText("info") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "info") assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) + ld = makeLogRecordWithSeverityNumberAndSeverityText(0, "informational") ld.SetSeverityText("informational") - assert.Equal(t, dataSetLogLevelInfo, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetInfoWarnLevel(t *testing.T) { // warn - ld.SetSeverityText("warn") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "warn") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityText("warning") + ld = makeLogRecordWithSeverityNumberAndSeverityText(0, "warning") assert.Equal(t, dataSetLogLevelWarn, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetInfoErrorLevel(t *testing.T) { // error - ld.SetSeverityText("error") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "error") assert.Equal(t, dataSetLogLevelError, mapOtelSeverityToDataSetSeverity(ld)) +} +func TestOtelSeverityToDataSetSeverityWithSeverityTextNoSeverityNumberDataSetInfoFatalLevel(t *testing.T) { // fatal - ld.SetSeverityText("fatal") + ld := makeLogRecordWithSeverityNumberAndSeverityText(0, "fatal") assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityText("critical") + ld = makeLogRecordWithSeverityNumberAndSeverityText(0, "fatal") assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) - ld.SetSeverityText("emergency") + ld = makeLogRecordWithSeverityNumberAndSeverityText(0, "emergency") assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) } func TestOtelSeverityToDataSetSeverityWithSeverityNumberAndSeverityTextSeverityNumberHasPriority(t *testing.T) { // If provided, SeverityNumber has priority over SeverityText - lr := testdata.GenerateLogsOneLogRecord() - ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) - - ld.SetSeverityNumber(3) - ld.SetSeverityText("debug") - assert.Equal(t, 1, mapOtelSeverityToDataSetSeverity(ld)) - - ld.SetSeverityNumber(22) - ld.SetSeverityText("info") - assert.Equal(t, 6, mapOtelSeverityToDataSetSeverity(ld)) + ld := makeLogRecordWithSeverityNumberAndSeverityText(3, "debug") + assert.Equal(t, dataSetLogLevelTrace, mapOtelSeverityToDataSetSeverity(ld)) + ld = makeLogRecordWithSeverityNumberAndSeverityText(22, "info") + assert.Equal(t, dataSetLogLevelFatal, mapOtelSeverityToDataSetSeverity(ld)) } From 401041db3ba12e560b044f97ce7d16dc770bc606 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 Jun 2023 16:29:24 +0200 Subject: [PATCH 23/37] Add changelog entry file. --- ...porter-various-improvements-and-fixes.yaml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .chloggen/dataset-exporter-various-improvements-and-fixes.yaml diff --git a/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml b/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml new file mode 100644 index 000000000000..62207ac59d7d --- /dev/null +++ b/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/datasetexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Correctly map LogRecord severity to DataSet severity, remove redundant DataSet event message field prefix (OtelExporter - Log -) and remove redundant DataSet event fields (flags, flags.is_sampled)." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [20660] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: From ae0730fe5fe6ef01bb448ccb23bb71f210bc522e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 11:02:52 +0200 Subject: [PATCH 24/37] Use consistent name for "observed time" field and also make sure it's consistently formatted as nanoseconds since unix epoch. --- exporter/datasetexporter/logs_exporter.go | 2 +- exporter/datasetexporter/logs_exporter_test.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index c0a69107f56f..99aefddec510 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -194,7 +194,7 @@ func buildEventFromLog( attrs["dropped_attributes_count"] = dropped } if !observedTs.Equal(time.Unix(0, 0)) { - attrs["observed.timestamp"] = observedTs.String() + attrs["sca:observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) } if span := log.SpanID().String(); span != "" { attrs["span_id"] = span diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 92824824df10..91147b494d98 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -259,7 +259,8 @@ func TestBuildEventFromLogEventWithoutTimestampWithObservedTimestampUseObservedT ld.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(1686235113, 0))) testLEventRaw.Ts = "1686235113000000000" - testLEventRaw.Attrs["observed.timestamp"] = "2023-06-08 14:38:33 +0000 UTC" + // 2023-06-08 14:38:33 +0000 UTC + testLEventRaw.Attrs["sca:observedTimestamp"] = "1686235113000000000" delete(testLEventRaw.Attrs, "timestamp") delete(testLEventRaw.Attrs, "resource.attributes.resource-attr") @@ -296,7 +297,7 @@ func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurren testLEventRaw.Ts = strconv.FormatInt(currentTime.UnixNano(), 10) delete(testLEventRaw.Attrs, "timestamp") - delete(testLEventRaw.Attrs, "observed.timestamp") + delete(testLEventRaw.Attrs, "sca:observedTimestamp") delete(testLEventRaw.Attrs, "resource.attributes.resource-attr") expected := &add_events.EventBundle{ From 11a425f24dc54b6015778a01c3a133a70dbda1e7 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 11:24:41 +0200 Subject: [PATCH 25/37] Update body / message field handling - we don't want to utilize "body.type" and "body.value" convetion since this is not used by any other integration and may can confusion. For now, we still decompose map into multiple attributes, but we may want to change that in the future as well for consistency reasons (in other integrations we don't do that - it either happens as part of a server side parser of client side processor / similar). --- exporter/datasetexporter/logs_exporter.go | 24 ++----- .../datasetexporter/logs_exporter_test.go | 69 +++++++------------ 2 files changed, 33 insertions(+), 60 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 99aefddec510..343e659bc194 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -56,26 +56,16 @@ func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config func buildBody(attrs map[string]interface{}, value pcommon.Value) string { message := value.AsString() - attrs["body.type"] = value.Type().String() + + // We already store value as part of a message field so there is no need to store it twice + // and also store the type which is dataset anti-pattern right now. We only handle it + // specially for map type where we decompose map into multiple attributes. Even though this + // may also not be desired in the future since other integrations don't handle it that way, + // but leave it up to the user to handle that (e.g. as part of a server side parser or otel + // collector attribute processor or similar). switch value.Type() { - case pcommon.ValueTypeEmpty: - attrs["body.empty"] = value.AsString() - case pcommon.ValueTypeStr: - attrs["body.str"] = value.Str() - case pcommon.ValueTypeBool: - attrs["body.bool"] = value.Bool() - case pcommon.ValueTypeDouble: - attrs["body.double"] = value.Double() - case pcommon.ValueTypeInt: - attrs["body.int"] = value.Int() case pcommon.ValueTypeMap: updateWithPrefixedValues(attrs, "body.map.", ".", value.Map().AsRaw(), 0) - case pcommon.ValueTypeBytes: - attrs["body.bytes"] = value.AsString() - case pcommon.ValueTypeSlice: - attrs["body.slice"] = value.AsRaw() - default: - attrs["body.unknown"] = value.AsString() } return message diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 91147b494d98..e3d06d43a445 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -61,65 +61,53 @@ func TestBuildBody(t *testing.T) { err = bytes.FromRaw([]byte{byte(65), byte(66), byte(67)}) assert.NoError(t, err) tests := []struct { - body pcommon.Value - key string - value interface{} - message string + body pcommon.Value + valueType string + message string }{ { - body: pcommon.NewValueEmpty(), - key: "body.empty", - value: "", - message: "", + body: pcommon.NewValueEmpty(), + valueType: "empty", + message: "", }, { - body: pcommon.NewValueStr("foo"), - key: "body.str", - value: "foo", - message: "foo", + body: pcommon.NewValueStr("foo"), + valueType: "string", + message: "foo", }, { - body: pcommon.NewValueBool(true), - key: "body.bool", - value: true, - message: "true", + body: pcommon.NewValueBool(true), + valueType: "bool", + message: "true", }, { - body: pcommon.NewValueDouble(42.5), - key: "body.double", - value: float64(42.5), - message: "42.5", + body: pcommon.NewValueDouble(42.5), + valueType: "double", + message: "42.5", }, { - body: pcommon.NewValueInt(42), - key: "body.int", - value: int64(42), - message: "42", + body: pcommon.NewValueInt(42), + valueType: "int", + message: "42", }, { - body: bytes, - key: "body.bytes", - value: "QUJD", - message: "QUJD", + body: bytes, + valueType: "bytes", + message: "QUJD", }, { - body: slice, - key: "body.slice", - value: []interface{}{int64(1), int64(2), int64(3)}, - message: "[1,2,3]", + body: slice, + valueType: "simpleMap", + message: "[1,2,3]", }, } for _, tt := range tests { - t.Run(tt.key, func(*testing.T) { + t.Run(tt.valueType, func(*testing.T) { attrs := make(map[string]interface{}) msg := buildBody(attrs, tt.body) - expectedAttrs := make(map[string]interface{}) - expectedAttrs["body.type"] = tt.body.Type().String() - expectedAttrs[tt.key] = tt.value - assert.Equal(t, tt.message, msg, tt.key) - assert.Equal(t, expectedAttrs, attrs, tt.key) + assert.Equal(t, tt.message, msg, tt.valueType) }) } } @@ -138,7 +126,6 @@ func TestBuildBodyMap(t *testing.T) { attrs := make(map[string]interface{}) msg := buildBody(attrs, m) expectedAttrs := make(map[string]interface{}) - expectedAttrs["body.type"] = pcommon.ValueTypeMap.String() expectedAttrs["body.map.scalar"] = "scalar-value" expectedAttrs["body.map.map.m1"] = "v1" expectedAttrs["body.map.map.m2"] = "v2" @@ -161,8 +148,6 @@ var testLEventRaw = &add_events.Event{ Attrs: map[string]interface{}{ "attributes.app": "server", "attributes.instance_num": int64(1), - "body.str": "This is a log message", - "body.type": "Str", "dropped_attributes_count": uint32(1), "message": "This is a log message", "scope.name": "", @@ -179,8 +164,6 @@ var testLEventReq = &add_events.Event{ Attrs: map[string]interface{}{ "attributes.app": "server", "attributes.instance_num": float64(1), - "body.str": "This is a log message", - "body.type": "Str", "dropped_attributes_count": float64(1), "message": "This is a log message", "scope.name": "", From b578baf45d42e45dc01fa6b5aaca0a1d7c47f82c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 11:46:30 +0200 Subject: [PATCH 26/37] Put functionality to decompose message / body field of a complex type (map) behind a config option / feature flag. Decomposing body is not a default behavior of any of the existing DataSet integration so doing that may cause confusion and additional log volume when that is not desired. If user wishes to achieve that, they can utilize attribute process, this config option / feature flag or a server side parser for the message field. --- exporter/datasetexporter/config.go | 9 +++++- exporter/datasetexporter/config_test.go | 3 +- exporter/datasetexporter/factory_test.go | 5 ++-- exporter/datasetexporter/logs_exporter.go | 28 +++++++++++-------- .../datasetexporter/logs_exporter_test.go | 9 ++++-- exporter/datasetexporter/testdata/config.yaml | 1 + 6 files changed, 38 insertions(+), 17 deletions(-) diff --git a/exporter/datasetexporter/config.go b/exporter/datasetexporter/config.go index 69a2d532b7e2..e84a87d16894 100644 --- a/exporter/datasetexporter/config.go +++ b/exporter/datasetexporter/config.go @@ -33,18 +33,25 @@ func newDefaultTracesSettings() TracesSettings { } const logsExportResourceInfoDefault = false +const logsDecomposeComplexMessageFieldDefault = false type LogsSettings struct { // ExportResourceInfo is optional flag to signal that the resource info is being exported to DataSet while exporting Logs. // This is especially useful when reducing DataSet billable log volume. // Default value: false. ExportResourceInfo bool `mapstructure:"export_resource_info_on_event"` + // DecomposeComplexMessageField is an optional flag to signal that message / body of complex types (e.g. a map) should be + // decomposed / deconstructed into multiple fields. This is usually done outside of the main DataSet integration on the + // client side (e.g. as part of the attribute processor or similar) or on the server side (DataSet server side JSON parser + // for message field) and that's why this functionality is disabled by default. + DecomposeComplexMessageField bool `mapstructure:"decompose_complex_message_field"` } // newDefaultLogsSettings returns the default settings for LogsSettings. func newDefaultLogsSettings() LogsSettings { return LogsSettings{ - ExportResourceInfo: logsExportResourceInfoDefault, + ExportResourceInfo: logsExportResourceInfoDefault, + DecomposeComplexMessageField: logsDecomposeComplexMessageFieldDefault, } } diff --git a/exporter/datasetexporter/config_test.go b/exporter/datasetexporter/config_test.go index 8bd938926da0..c0cbb84309df 100644 --- a/exporter/datasetexporter/config_test.go +++ b/exporter/datasetexporter/config_test.go @@ -44,6 +44,7 @@ func TestConfigUseDefaults(t *testing.T) { assert.Equal(t, bufferMaxLifetime, config.MaxLifetime) assert.Equal(t, tracesMaxWait, config.TracesSettings.MaxWait) assert.Equal(t, logsExportResourceInfoDefault, config.LogsSettings.ExportResourceInfo) + assert.Equal(t, logsDecomposeComplexMessageFieldDefault, config.LogsSettings.DecomposeComplexMessageField) } func TestConfigValidate(t *testing.T) { @@ -115,7 +116,7 @@ func TestConfigString(t *testing.T) { } assert.Equal(t, - "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}; LogsSettings: {ExportResourceInfo:false}", + "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}; LogsSettings: {ExportResourceInfo:false DecomposeComplexMessageField:false}", config.String(), ) } diff --git a/exporter/datasetexporter/factory_test.go b/exporter/datasetexporter/factory_test.go index 6d2df780a697..cb1640ec873e 100644 --- a/exporter/datasetexporter/factory_test.go +++ b/exporter/datasetexporter/factory_test.go @@ -90,7 +90,8 @@ func TestLoadConfig(t *testing.T) { MaxWait: 3 * time.Second, }, LogsSettings: LogsSettings{ - ExportResourceInfo: true, + ExportResourceInfo: true, + DecomposeComplexMessageField: true, }, RetrySettings: exporterhelper.RetrySettings{ Enabled: true, @@ -138,7 +139,7 @@ func createExporterTests() []CreateTest { { name: "broken", config: &Config{}, - expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; LogsSettings: {ExportResourceInfo:false}; config is not valid: api_key is required"), + expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; LogsSettings: {ExportResourceInfo:false DecomposeComplexMessageField:false}; config is not valid: api_key is required"), }, { name: "valid", diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 343e659bc194..36bb908051b2 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -54,18 +54,24 @@ func createLogsExporter(ctx context.Context, set exporter.CreateSettings, config ) } -func buildBody(attrs map[string]interface{}, value pcommon.Value) string { +func buildBody(settings LogsSettings, attrs map[string]interface{}, value pcommon.Value) string { + // The message / body is stored as part of the "message" field on the DataSet event. message := value.AsString() - // We already store value as part of a message field so there is no need to store it twice - // and also store the type which is dataset anti-pattern right now. We only handle it - // specially for map type where we decompose map into multiple attributes. Even though this - // may also not be desired in the future since other integrations don't handle it that way, - // but leave it up to the user to handle that (e.g. as part of a server side parser or otel - // collector attribute processor or similar). - switch value.Type() { - case pcommon.ValueTypeMap: - updateWithPrefixedValues(attrs, "body.map.", ".", value.Map().AsRaw(), 0) + // Additionally, we support de-composing complex message value (e.g. map / dictionary) into + // multiple event attributes. + // + // This functionality is behind a config option / feature flag and not enabled by default + // since no other existing DataSet integrations handle it in this manner (aka for out of + // the box consistency reasons). + // If user wants to achieve something like that, they usually handle that on the client + // (e.g. attribute processor or similar) or on the server (DataSet server side JSON parser + // for the message field). + if settings.DecomposeComplexMessageField { + switch value.Type() { + case pcommon.ValueTypeMap: + updateWithPrefixedValues(attrs, "body.map.", ".", value.Map().AsRaw(), 0) + } } return message @@ -178,7 +184,7 @@ func buildEventFromLog( } if body := log.Body().AsString(); body != "" { - attrs["message"] = buildBody(attrs, log.Body()) + attrs["message"] = buildBody(settings, attrs, log.Body()) } if dropped := log.DroppedAttributesCount(); dropped > 0 { attrs["dropped_attributes_count"] = dropped diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index e3d06d43a445..f584f43cf61c 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -102,10 +102,12 @@ func TestBuildBody(t *testing.T) { }, } + settings := newDefaultLogsSettings() + for _, tt := range tests { t.Run(tt.valueType, func(*testing.T) { attrs := make(map[string]interface{}) - msg := buildBody(attrs, tt.body) + msg := buildBody(settings, attrs, tt.body) assert.Equal(t, tt.message, msg, tt.valueType) }) @@ -123,8 +125,11 @@ func TestBuildBodyMap(t *testing.T) { "array": []any{1, 2, 3}, }) if assert.NoError(t, err) { + settings := newDefaultLogsSettings() + settings.DecomposeComplexMessageField = true attrs := make(map[string]interface{}) - msg := buildBody(attrs, m) + + msg := buildBody(settings, attrs, m) expectedAttrs := make(map[string]interface{}) expectedAttrs["body.map.scalar"] = "scalar-value" expectedAttrs["body.map.map.m1"] = "v1" diff --git a/exporter/datasetexporter/testdata/config.yaml b/exporter/datasetexporter/testdata/config.yaml index 61360b810919..b42b7aa12258 100644 --- a/exporter/datasetexporter/testdata/config.yaml +++ b/exporter/datasetexporter/testdata/config.yaml @@ -27,6 +27,7 @@ dataset/full: max_wait: 3s logs: export_resource_info_on_event: true + decompose_complex_message_field: true retry_on_failure: enabled: true initial_interval: 11 From ec0f58b4b05f2d52829c6b6eb29db707fbe055f6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 11:49:22 +0200 Subject: [PATCH 27/37] Fix lint. --- exporter/datasetexporter/logs_exporter.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 36bb908051b2..cbd67614d6be 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -67,11 +67,8 @@ func buildBody(settings LogsSettings, attrs map[string]interface{}, value pcommo // If user wants to achieve something like that, they usually handle that on the client // (e.g. attribute processor or similar) or on the server (DataSet server side JSON parser // for the message field). - if settings.DecomposeComplexMessageField { - switch value.Type() { - case pcommon.ValueTypeMap: - updateWithPrefixedValues(attrs, "body.map.", ".", value.Map().AsRaw(), 0) - } + if settings.DecomposeComplexMessageField && value.Type() == pcommon.ValueTypeMap { + updateWithPrefixedValues(attrs, "body.map.", ".", value.Map().AsRaw(), 0) } return message From d284d14d663d87c52d281c723e90ecccc2299de0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 11:50:44 +0200 Subject: [PATCH 28/37] Add missing code comment. --- exporter/datasetexporter/logs_exporter.go | 1 + 1 file changed, 1 insertion(+) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index cbd67614d6be..2819fb546aed 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) +// We define it here so we can easily mock it inside tests var now = time.Now // If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use From 0bbaa50d38ccdf8469b2bde97ccb5e30cd33c98a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 12:14:50 +0200 Subject: [PATCH 29/37] Use a constant. --- exporter/datasetexporter/logs_exporter.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 2819fb546aed..dad99635340b 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -21,6 +21,9 @@ import ( // We define it here so we can easily mock it inside tests var now = time.Now +// Prefix which is added to all the special / internal DataSet fields +const specialDataSetFieldNamePrefix string = "sca:" + // If a LogRecord doesn't contain severity or we can't map it to a valid DataSet severity, we use // this value (3 - INFO) instead const defaultDataSetSeverityLevel int = dataSetLogLevelInfo @@ -184,12 +187,15 @@ func buildEventFromLog( if body := log.Body().AsString(); body != "" { attrs["message"] = buildBody(settings, attrs, log.Body()) } + if dropped := log.DroppedAttributesCount(); dropped > 0 { attrs["dropped_attributes_count"] = dropped } + if !observedTs.Equal(time.Unix(0, 0)) { - attrs["sca:observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) + attrs[specialDataSetFieldNamePrefix + "observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) } + if span := log.SpanID().String(); span != "" { attrs["span_id"] = span } From 64a5d886ed37f418ff44733333ae63e09a365331 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 15:04:25 +0200 Subject: [PATCH 30/37] Allow user to control if scope info and attributes are exported with each event (when available). This can be done via "export_scope_info_on_event" config option / feature flag. This is similar to the "export_resource_info" change we added a while ago. Scope info may not always be used by the end user so ingesting it with each event may just result in increased billable log volume for the end user without providing any value. Since the value of scope attributes depends on the implementation and it's usually much less noisy / verbose than resource information, the default value will stay "true" for now. If in the future it turns out that "false" makes more sense for a default value, we can change it then. --- exporter/datasetexporter/config.go | 7 +++++++ exporter/datasetexporter/config_test.go | 18 +++++++++++++++++- exporter/datasetexporter/factory_test.go | 5 ++++- exporter/datasetexporter/logs_exporter.go | 11 ++++++++--- exporter/datasetexporter/logs_exporter_test.go | 2 ++ exporter/datasetexporter/testdata/config.yaml | 1 + 6 files changed, 39 insertions(+), 5 deletions(-) diff --git a/exporter/datasetexporter/config.go b/exporter/datasetexporter/config.go index e84a87d16894..fed06cc26b0c 100644 --- a/exporter/datasetexporter/config.go +++ b/exporter/datasetexporter/config.go @@ -33,6 +33,7 @@ func newDefaultTracesSettings() TracesSettings { } const logsExportResourceInfoDefault = false +const logsExportScopeInfoDefault = true const logsDecomposeComplexMessageFieldDefault = false type LogsSettings struct { @@ -40,6 +41,11 @@ type LogsSettings struct { // This is especially useful when reducing DataSet billable log volume. // Default value: false. ExportResourceInfo bool `mapstructure:"export_resource_info_on_event"` + + // ExportScopeInfo is optional flag that signals if scope info should be exported (when available) with each event. If scope + // information is not utilized, it makes sense to disable exporting it since it will result in increased billable log volume. + ExportScopeInfo bool `mapstructure:"export_scope_info_on_event"` + // DecomposeComplexMessageField is an optional flag to signal that message / body of complex types (e.g. a map) should be // decomposed / deconstructed into multiple fields. This is usually done outside of the main DataSet integration on the // client side (e.g. as part of the attribute processor or similar) or on the server side (DataSet server side JSON parser @@ -51,6 +57,7 @@ type LogsSettings struct { func newDefaultLogsSettings() LogsSettings { return LogsSettings{ ExportResourceInfo: logsExportResourceInfoDefault, + ExportScopeInfo: logsExportScopeInfoDefault, DecomposeComplexMessageField: logsDecomposeComplexMessageFieldDefault, } } diff --git a/exporter/datasetexporter/config_test.go b/exporter/datasetexporter/config_test.go index c0cbb84309df..0f7b502bb109 100644 --- a/exporter/datasetexporter/config_test.go +++ b/exporter/datasetexporter/config_test.go @@ -44,6 +44,7 @@ func TestConfigUseDefaults(t *testing.T) { assert.Equal(t, bufferMaxLifetime, config.MaxLifetime) assert.Equal(t, tracesMaxWait, config.TracesSettings.MaxWait) assert.Equal(t, logsExportResourceInfoDefault, config.LogsSettings.ExportResourceInfo) + assert.Equal(t, logsExportScopeInfoDefault, config.LogsSettings.ExportScopeInfo) assert.Equal(t, logsDecomposeComplexMessageFieldDefault, config.LogsSettings.DecomposeComplexMessageField) } @@ -116,7 +117,7 @@ func TestConfigString(t *testing.T) { } assert.Equal(t, - "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}; LogsSettings: {ExportResourceInfo:false DecomposeComplexMessageField:false}", + "DatasetURL: https://example.com; BufferSettings: {MaxLifetime:123ns GroupBy:[field1 field2] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:true MaxWait:45s}; RetrySettings: {Enabled:true InitialInterval:5s RandomizationFactor:0.5 Multiplier:1.5 MaxInterval:30s MaxElapsedTime:5m0s}; QueueSettings: {Enabled:true NumConsumers:10 QueueSize:1000 StorageID:}; TimeoutSettings: {Timeout:5s}; LogsSettings: {ExportResourceInfo:false ExportScopeInfo:false DecomposeComplexMessageField:false}", config.String(), ) } @@ -135,3 +136,18 @@ func TestConfigUseProvidedExportResourceInfoValue(t *testing.T) { assert.Nil(t, err) assert.Equal(t, true, config.LogsSettings.ExportResourceInfo) } + +func TestConfigUseProvidedExportScopeInfoValue(t *testing.T) { + f := NewFactory() + config := f.CreateDefaultConfig().(*Config) + configMap := confmap.NewFromStringMap(map[string]interface{}{ + "dataset_url": "https://example.com", + "api_key": "secret", + "logs": map[string]any{ + "export_scope_info_on_event": false, + }, + }) + err := config.Unmarshal(configMap) + assert.Nil(t, err) + assert.Equal(t, false, config.LogsSettings.ExportScopeInfo) +} diff --git a/exporter/datasetexporter/factory_test.go b/exporter/datasetexporter/factory_test.go index cb1640ec873e..1fbae40f3177 100644 --- a/exporter/datasetexporter/factory_test.go +++ b/exporter/datasetexporter/factory_test.go @@ -26,6 +26,7 @@ func TestCreateDefaultConfig(t *testing.T) { assert.Equal(t, &Config{ BufferSettings: newDefaultBufferSettings(), TracesSettings: newDefaultTracesSettings(), + LogsSettings: newDefaultLogsSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), @@ -91,6 +92,7 @@ func TestLoadConfig(t *testing.T) { }, LogsSettings: LogsSettings{ ExportResourceInfo: true, + ExportScopeInfo: true, DecomposeComplexMessageField: true, }, RetrySettings: exporterhelper.RetrySettings{ @@ -139,7 +141,7 @@ func createExporterTests() []CreateTest { { name: "broken", config: &Config{}, - expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; LogsSettings: {ExportResourceInfo:false DecomposeComplexMessageField:false}; config is not valid: api_key is required"), + expectedError: fmt.Errorf("cannot get DataSetExpoter: cannot convert config: DatasetURL: ; BufferSettings: {MaxLifetime:0s GroupBy:[] RetryInitialInterval:0s RetryMaxInterval:0s RetryMaxElapsedTime:0s}; TracesSettings: {Aggregate:false MaxWait:0s}; RetrySettings: {Enabled:false InitialInterval:0s RandomizationFactor:0 Multiplier:0 MaxInterval:0s MaxElapsedTime:0s}; QueueSettings: {Enabled:false NumConsumers:0 QueueSize:0 StorageID:}; TimeoutSettings: {Timeout:0s}; LogsSettings: {ExportResourceInfo:false ExportScopeInfo:false DecomposeComplexMessageField:false}; config is not valid: api_key is required"), }, { name: "valid", @@ -153,6 +155,7 @@ func createExporterTests() []CreateTest { RetryMaxInterval: time.Minute, RetryMaxElapsedTime: time.Hour, }, + LogsSettings: newDefaultLogsSettings(), TracesSettings: TracesSettings{ Aggregate: true, MaxWait: 5 * time.Second, diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index dad99635340b..7e7d01861f46 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -193,7 +193,7 @@ func buildEventFromLog( } if !observedTs.Equal(time.Unix(0, 0)) { - attrs[specialDataSetFieldNamePrefix + "observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) + attrs[specialDataSetFieldNamePrefix+"observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) } if span := log.SpanID().String(); span != "" { @@ -223,8 +223,13 @@ func buildEventFromLog( if settings.ExportResourceInfo { updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) } - attrs["scope.name"] = scope.Name() - updateWithPrefixedValues(attrs, "scope.attributes.", ".", scope.Attributes().AsRaw(), 0) + + fmt.Printf("a, %v", settings.ExportScopeInfo) + + if settings.ExportScopeInfo { + attrs["scope.name"] = scope.Name() + updateWithPrefixedValues(attrs, "scope.attributes.", ".", scope.Attributes().AsRaw(), 0) + } event.Attrs = attrs event.Log = "LL" diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index f584f43cf61c..5fb79b70d29d 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -231,6 +231,7 @@ func TestBuildEventFromLogExportResources(t *testing.T) { lr.ResourceLogs().At(0).ScopeLogs().At(0).Scope(), LogsSettings{ ExportResourceInfo: true, + ExportScopeInfo: true, }, ) @@ -352,6 +353,7 @@ func TestConsumeLogsShouldSucceed(t *testing.T) { RetryMaxInterval: time.Minute, RetryMaxElapsedTime: time.Hour, }, + LogsSettings: newDefaultLogsSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), diff --git a/exporter/datasetexporter/testdata/config.yaml b/exporter/datasetexporter/testdata/config.yaml index b42b7aa12258..76aae4b1dc54 100644 --- a/exporter/datasetexporter/testdata/config.yaml +++ b/exporter/datasetexporter/testdata/config.yaml @@ -27,6 +27,7 @@ dataset/full: max_wait: 3s logs: export_resource_info_on_event: true + export_scope_info_on_event: true decompose_complex_message_field: true retry_on_failure: enabled: true From 6ca746d8abeac83f410d7799ab88d78428acafbe Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 15:09:21 +0200 Subject: [PATCH 31/37] Add changelog file. --- ...ter-timestamp-body-scope-improvements.yaml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml diff --git a/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml b/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml new file mode 100644 index 000000000000..275264c3735c --- /dev/null +++ b/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/datasetexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Rename 'observed_timestamp' field on the DataSet event to 'sca:observedTimestamp' and ensure the value is nanoseconds since epoch, update serializing and handling of body / message field to ensure it's consistent with other DataSet integrations and allow user to disable exporting scope information with each event by setting 'export_scope_info_on_event' logs config option to false." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [20660] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: From 4c2c1bd8d5ae845967fd23176f86ef69120fbe47 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 Jun 2023 15:26:16 +0200 Subject: [PATCH 32/37] Remove debugging change, make sure we don't set scope.name attribute if it's empty and add test case for it. --- exporter/datasetexporter/logs_exporter.go | 6 +-- .../datasetexporter/logs_exporter_test.go | 49 ++++++++++++++++++- 2 files changed, 50 insertions(+), 5 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 7e7d01861f46..67e11a9b93b7 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -224,10 +224,10 @@ func buildEventFromLog( updateWithPrefixedValues(attrs, "resource.attributes.", ".", resource.Attributes().AsRaw(), 0) } - fmt.Printf("a, %v", settings.ExportScopeInfo) - if settings.ExportScopeInfo { - attrs["scope.name"] = scope.Name() + if scope.Name() != "" { + attrs["scope.name"] = scope.Name() + } updateWithPrefixedValues(attrs, "scope.attributes.", ".", scope.Attributes().AsRaw(), 0) } diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 5fb79b70d29d..25754c1e316f 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -155,7 +155,22 @@ var testLEventRaw = &add_events.Event{ "attributes.instance_num": int64(1), "dropped_attributes_count": uint32(1), "message": "This is a log message", - "scope.name": "", + "span_id": "0102040800000000", + "trace_id": "08040201000000000000000000000000", + }, +} + +var testLEventRawWithScopeInfo = &add_events.Event{ + Thread: "TL", + Log: "LL", + Sev: 3, + Ts: "1581452773000000789", + Attrs: map[string]interface{}{ + "attributes.app": "server", + "attributes.instance_num": int64(1), + "dropped_attributes_count": uint32(1), + "scope.name": "test-scope", + "message": "This is a log message", "span_id": "0102040800000000", "trace_id": "08040201000000000000000000000000", }, @@ -171,7 +186,6 @@ var testLEventReq = &add_events.Event{ "attributes.instance_num": float64(1), "dropped_attributes_count": float64(1), "message": "This is a log message", - "scope.name": "", "span_id": "0102040800000000", "trace_id": "08040201000000000000000000000000", "bundle_key": "d41d8cd98f00b204e9800998ecf8427e", @@ -238,6 +252,37 @@ func TestBuildEventFromLogExportResources(t *testing.T) { assert.Equal(t, expected, was) } +func TestBuildEventFromLogExportScopeInfo(t *testing.T) { + lr := testdata.GenerateLogsOneLogRecord() + ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + + scope := pcommon.NewInstrumentationScope() + scope.SetName("test-scope") + scope.SetDroppedAttributesCount(11) + + expected := &add_events.EventBundle{ + Event: &add_events.Event{ + Thread: testLEventRawWithScopeInfo.Thread, + Log: testLEventRawWithScopeInfo.Log, + Sev: testLEventRawWithScopeInfo.Sev, + Ts: testLEventRawWithScopeInfo.Ts, + Attrs: testLEventRawWithScopeInfo.Attrs, + }, + Thread: testLThread, + Log: testLLog, + } + was := buildEventFromLog( + ld, + lr.ResourceLogs().At(0).Resource(), + scope, + LogsSettings{ + ExportResourceInfo: false, + ExportScopeInfo: true, + }, + ) + + assert.Equal(t, expected, was) +} func TestBuildEventFromLogEventWithoutTimestampWithObservedTimestampUseObservedTimestamp(t *testing.T) { // When LogRecord doesn't have timestamp set, but it has ObservedTimestamp set, // ObservedTimestamp should be used From 893e44a9977f8facf9e300818d7e3ca59919bfcc Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 28 Jun 2023 10:10:15 +0200 Subject: [PATCH 33/37] For consistency name the field "sca:observedTime" instead (we already have a field named "sca:ingestTime" added on the server side). --- exporter/datasetexporter/logs_exporter.go | 2 +- exporter/datasetexporter/logs_exporter_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index 67e11a9b93b7..02eadaa6dec9 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -193,7 +193,7 @@ func buildEventFromLog( } if !observedTs.Equal(time.Unix(0, 0)) { - attrs[specialDataSetFieldNamePrefix+"observedTimestamp"] = strconv.FormatInt(observedTs.UnixNano(), 10) + attrs[specialDataSetFieldNamePrefix+"observedTime"] = strconv.FormatInt(observedTs.UnixNano(), 10) } if span := log.SpanID().String(); span != "" { diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index 25754c1e316f..9eb30b030850 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -294,7 +294,7 @@ func TestBuildEventFromLogEventWithoutTimestampWithObservedTimestampUseObservedT testLEventRaw.Ts = "1686235113000000000" // 2023-06-08 14:38:33 +0000 UTC - testLEventRaw.Attrs["sca:observedTimestamp"] = "1686235113000000000" + testLEventRaw.Attrs["sca:observedTime"] = "1686235113000000000" delete(testLEventRaw.Attrs, "timestamp") delete(testLEventRaw.Attrs, "resource.attributes.resource-attr") @@ -331,7 +331,7 @@ func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurren testLEventRaw.Ts = strconv.FormatInt(currentTime.UnixNano(), 10) delete(testLEventRaw.Attrs, "timestamp") - delete(testLEventRaw.Attrs, "sca:observedTimestamp") + delete(testLEventRaw.Attrs, "sca:observedTime") delete(testLEventRaw.Attrs, "resource.attributes.resource-attr") expected := &add_events.EventBundle{ From 539dd0a15403017c2d55f17d84058102090a34c6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 29 Jun 2023 11:28:04 +0200 Subject: [PATCH 34/37] Add missing PR references to changelog entries for the dataset exporter. Also add missing "dataset-exporter" suffix to one of the files to prevent possible conflicts in the future. --- ...exporter-export-logs-resource-info-based-configuration.yaml} | 2 +- .../dataset-exporter-timestamp-body-scope-improvements.yaml | 2 +- .chloggen/dataset-exporter-various-improvements-and-fixes.yaml | 2 +- .chloggen/dataset-more-detailed-user-agent.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) rename .chloggen/{export-logs-resource-info-based-configuration.yaml => dataset-exporter-export-logs-resource-info-based-configuration.yaml} (97%) diff --git a/.chloggen/export-logs-resource-info-based-configuration.yaml b/.chloggen/dataset-exporter-export-logs-resource-info-based-configuration.yaml similarity index 97% rename from .chloggen/export-logs-resource-info-based-configuration.yaml rename to .chloggen/dataset-exporter-export-logs-resource-info-based-configuration.yaml index 8e4872151206..b5937c6e5f51 100644 --- a/.chloggen/export-logs-resource-info-based-configuration.yaml +++ b/.chloggen/dataset-exporter-export-logs-resource-info-based-configuration.yaml @@ -12,7 +12,7 @@ component: datasetexporter note: "Allow include Logs resource info export to DataSet based on new export_resource_info_on_event configuration. Fix timestamp handling." # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [20660] +issues: [20660, 23250] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. diff --git a/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml b/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml index 275264c3735c..aa45dd38afea 100644 --- a/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml +++ b/.chloggen/dataset-exporter-timestamp-body-scope-improvements.yaml @@ -12,7 +12,7 @@ component: exporter/datasetexporter note: "Rename 'observed_timestamp' field on the DataSet event to 'sca:observedTimestamp' and ensure the value is nanoseconds since epoch, update serializing and handling of body / message field to ensure it's consistent with other DataSet integrations and allow user to disable exporting scope information with each event by setting 'export_scope_info_on_event' logs config option to false." # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [20660] +issues: [20660, 23826] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. diff --git a/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml b/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml index 62207ac59d7d..e87183ed4ed7 100644 --- a/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml +++ b/.chloggen/dataset-exporter-various-improvements-and-fixes.yaml @@ -12,7 +12,7 @@ component: exporter/datasetexporter note: "Correctly map LogRecord severity to DataSet severity, remove redundant DataSet event message field prefix (OtelExporter - Log -) and remove redundant DataSet event fields (flags, flags.is_sampled)." # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [20660] +issues: [20660, 23672] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. diff --git a/.chloggen/dataset-more-detailed-user-agent.yaml b/.chloggen/dataset-more-detailed-user-agent.yaml index 5b7c265a7ba2..1e5b10dbfeb9 100644 --- a/.chloggen/dataset-more-detailed-user-agent.yaml +++ b/.chloggen/dataset-more-detailed-user-agent.yaml @@ -12,7 +12,7 @@ component: exporter/datasetexporter note: "Add more details to User-Agent header for DataSet HTTP requests" # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [20660] +issues: [20660, 23640] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. From c7e20389c1d8ea85ba8f8bdfbbdcd564a8f7a4e5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 7 Jul 2023 10:41:29 +0200 Subject: [PATCH 35/37] Update exporter readme with new config options. --- exporter/datasetexporter/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index dcf867a07713..c60a7b003078 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -36,11 +36,12 @@ If you do not want to specify `api_key` in the file, you can use the [builtin fu - `max_wait` (default = 5s): The maximum waiting for all spans from single trace to arrive; ignored if `aggregate` is false. - `logs`: - `export_resource_info_on_event` (default = false): Include resource info to DataSet Event while exporting Logs. This is especially useful when reducing DataSet billable log volume. + - `export_scope_info_on_event` (default = false): Include LogRecord scope information (if available) on the DataSet event. + - `decompose_complex_message_field` (default = true): Set this to false to disable decompossing complex body / message field types (e.g. a map) into separate fields. - `retry_on_failure`: See [retry_on_failure](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `sending_queue`: See [sending_queue](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `timeout`: See [timeout](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - ### Example ```yaml From b0dc06f54b06c3cf7640c91b75eda090dc65f2d1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus <126863902+tomaz-s1@users.noreply.github.com> Date: Wed, 19 Jul 2023 18:15:25 +0200 Subject: [PATCH 36/37] Update exporter/datasetexporter/README.md Co-authored-by: Pablo Baeyens --- exporter/datasetexporter/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/README.md b/exporter/datasetexporter/README.md index f181ac696a32..90389ac29f5d 100644 --- a/exporter/datasetexporter/README.md +++ b/exporter/datasetexporter/README.md @@ -35,7 +35,7 @@ If you do not want to specify `api_key` in the file, you can use the [builtin fu - `logs`: - `export_resource_info_on_event` (default = false): Include resource info to DataSet Event while exporting Logs. This is especially useful when reducing DataSet billable log volume. - `export_scope_info_on_event` (default = false): Include LogRecord scope information (if available) on the DataSet event. - - `decompose_complex_message_field` (default = true): Set this to false to disable decompossing complex body / message field types (e.g. a map) into separate fields. + - `decompose_complex_message_field` (default = true): Set this to false to disable decomposing complex body / message field types (e.g. a map) into separate fields. - `retry_on_failure`: See [retry_on_failure](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `sending_queue`: See [sending_queue](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) - `timeout`: See [timeout](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) From 8df03dc80f037f4951b04f6759849c3eaf3c6501 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus <126863902+tomaz-s1@users.noreply.github.com> Date: Thu, 20 Jul 2023 12:44:48 +0200 Subject: [PATCH 37/37] Update exporter/datasetexporter/config.go Co-authored-by: Pablo Baeyens --- exporter/datasetexporter/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/datasetexporter/config.go b/exporter/datasetexporter/config.go index 3e39f633794f..3196c3d9ebf3 100644 --- a/exporter/datasetexporter/config.go +++ b/exporter/datasetexporter/config.go @@ -34,7 +34,7 @@ type LogsSettings struct { // Default value: false. ExportResourceInfo bool `mapstructure:"export_resource_info_on_event"` - // ExportScopeInfo is optional flag that signals if scope info should be exported (when available) with each event. If scope + // ExportScopeInfo is an optional flag that signals if scope info should be exported (when available) with each event. If scope // information is not utilized, it makes sense to disable exporting it since it will result in increased billable log volume. ExportScopeInfo bool `mapstructure:"export_scope_info_on_event"`