Skip to content

Commit

Permalink
[processor/tailsampling] use generated status header (#22757)
Browse files Browse the repository at this point in the history
This PR updates the component to generate the status table using mdatagen.

Linked issue: #21213

Signed-off-by: Alex Boten <[email protected]>
  • Loading branch information
Alex Boten authored May 24, 2023
1 parent 0801d00 commit 88887da
Show file tree
Hide file tree
Showing 7 changed files with 52 additions and 29 deletions.
22 changes: 12 additions & 10 deletions processor/tailsamplingprocessor/README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,17 @@
# Tail Sampling Processor

| Status | |
| ------------------------ |-----------------------------------------|
| Stability | [beta] |
| Supported pipeline types | traces |
| Distributions | [contrib], [observiq], [splunk], [sumo] |
<!-- status autogenerated section -->
| Status | |
| ------------- |-----------|
| Stability | [beta]: traces |
| Distributions | [contrib], [observiq], [splunk], [sumo] |

[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
[observiq]: https://github.com/observIQ/observiq-otel-collector
[splunk]: https://github.com/signalfx/splunk-otel-collector
[sumo]: https://github.com/SumoLogic/sumologic-otel-collector
<!-- end autogenerated section -->

The tail sampling processor samples traces based on a set of defined policies. All spans for a given trace MUST be received by the same collector instance for effective sampling decisions.

Expand Down Expand Up @@ -207,10 +214,5 @@ As a rule of thumb, if you want to add probabilistic sampling and...

...you are already using the tail sampling processor: add the probabilistic sampling policy. You are already incurring the cost of running the tail sampling processor, adding the probabilistic policy will be negligible. Additionally, using the policy within the tail sampling processor will ensure traces that are sampled by other policies will not be dropped.

[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
[probabilistic_sampling_processor]: ../probabilisticsamplerprocessor
[loadbalancing_exporter]: ../../exporter/loadbalancingexporter
[splunk]: https://github.com/signalfx/splunk-otel-collector
[observiq]: https://github.com/observIQ/observiq-otel-collector
[sumo]: https://github.com/SumoLogic/sumologic-otel-collector
3 changes: 2 additions & 1 deletion processor/tailsamplingprocessor/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"go.opentelemetry.io/collector/confmap/confmaptest"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)

func TestLoadConfig(t *testing.T) {
Expand All @@ -25,7 +26,7 @@ func TestLoadConfig(t *testing.T) {
factory := NewFactory()
cfg := factory.CreateDefaultConfig()

sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String())
sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String())
require.NoError(t, err)
require.NoError(t, component.UnmarshalConfig(sub, cfg))

Expand Down
13 changes: 5 additions & 8 deletions processor/tailsamplingprocessor/factory.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

//go:generate mdatagen metadata.yaml

package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"

import (
Expand All @@ -13,13 +15,8 @@ import (
"go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
)

const (
// The value of "type" Tail Sampling in configuration.
typeStr = "tail_sampling"
// The stability level of the processor.
stability = component.StabilityLevelBeta
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)

var onceMetrics sync.Once
Expand All @@ -32,9 +29,9 @@ func NewFactory() processor.Factory {
})

return processor.NewFactory(
typeStr,
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, stability))
processor.WithTraces(createTracesProcessor, metadata.TracesStability))
}

func createDefaultConfig() component.Config {
Expand Down
4 changes: 3 additions & 1 deletion processor/tailsamplingprocessor/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"go.opentelemetry.io/collector/confmap/confmaptest"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/processor/processortest"

"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)

func TestCreateDefaultConfig(t *testing.T) {
Expand All @@ -30,7 +32,7 @@ func TestCreateProcessor(t *testing.T) {
factory := NewFactory()
cfg := factory.CreateDefaultConfig()

sub, err := cm.Sub(component.NewIDWithName(typeStr, "").String())
sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String())
require.NoError(t, err)
require.NoError(t, component.UnmarshalConfig(sub, cfg))

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions processor/tailsamplingprocessor/metadata.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
type: tail_sampling

status:
class: processor
stability:
beta: [traces]
distributions: [contrib, observiq, splunk, sumo]
20 changes: 11 additions & 9 deletions processor/tailsamplingprocessor/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import (
"go.opencensus.io/tag"
"go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/collector/obsreport"

"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)

// Variables related to metrics specific to tail sampling.
Expand Down Expand Up @@ -44,62 +46,62 @@ func SamplingProcessorMetricViews(level configtelemetry.Level) []*view.View {
ageDistributionAggregation := view.Distribution(1, 2, 5, 10, 20, 30, 40, 50, 60, 90, 120, 180, 300, 600, 1800, 3600, 7200)

decisionLatencyView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statDecisionLatencyMicroSec.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statDecisionLatencyMicroSec.Name()),
Measure: statDecisionLatencyMicroSec,
Description: statDecisionLatencyMicroSec.Description(),
TagKeys: policyTagKeys,
Aggregation: latencyDistributionAggregation,
}
overallDecisionLatencyView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statOverallDecisionLatencyUs.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statOverallDecisionLatencyUs.Name()),
Measure: statOverallDecisionLatencyUs,
Description: statOverallDecisionLatencyUs.Description(),
Aggregation: latencyDistributionAggregation,
}

traceRemovalAgeView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statTraceRemovalAgeSec.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statTraceRemovalAgeSec.Name()),
Measure: statTraceRemovalAgeSec,
Description: statTraceRemovalAgeSec.Description(),
Aggregation: ageDistributionAggregation,
}
lateSpanArrivalView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statLateSpanArrivalAfterDecision.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statLateSpanArrivalAfterDecision.Name()),
Measure: statLateSpanArrivalAfterDecision,
Description: statLateSpanArrivalAfterDecision.Description(),
Aggregation: ageDistributionAggregation,
}

countPolicyEvaluationErrorView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statPolicyEvaluationErrorCount.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statPolicyEvaluationErrorCount.Name()),
Measure: statPolicyEvaluationErrorCount,
Description: statPolicyEvaluationErrorCount.Description(),
Aggregation: view.Sum(),
}

sampledTagKeys := []tag.Key{tagPolicyKey, tagSampledKey}
countTracesSampledView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statCountTracesSampled.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statCountTracesSampled.Name()),
Measure: statCountTracesSampled,
Description: statCountTracesSampled.Description(),
TagKeys: sampledTagKeys,
Aggregation: view.Sum(),
}

countTraceDroppedTooEarlyView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statDroppedTooEarlyCount.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statDroppedTooEarlyCount.Name()),
Measure: statDroppedTooEarlyCount,
Description: statDroppedTooEarlyCount.Description(),
Aggregation: view.Sum(),
}
countTraceIDArrivalView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statNewTraceIDReceivedCount.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statNewTraceIDReceivedCount.Name()),
Measure: statNewTraceIDReceivedCount,
Description: statNewTraceIDReceivedCount.Description(),
Aggregation: view.Sum(),
}
trackTracesOnMemorylView := &view.View{
Name: obsreport.BuildProcessorCustomMetricName(typeStr, statTracesOnMemoryGauge.Name()),
Name: obsreport.BuildProcessorCustomMetricName(metadata.Type, statTracesOnMemoryGauge.Name()),
Measure: statTracesOnMemoryGauge,
Description: statTracesOnMemoryGauge.Description(),
Aggregation: view.LastValue(),
Expand Down

0 comments on commit 88887da

Please sign in to comment.