From 09c8bb72ebf57444472efe10fc275041eb294b5d Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Wed, 17 Aug 2022 16:17:24 +0000 Subject: [PATCH] reimplement OpenCensus bridge --- bridge/opencensus/aggregation.go | 157 ----- bridge/opencensus/aggregation_test.go | 322 --------- bridge/opencensus/exporter.go | 186 ----- bridge/opencensus/exporter_test.go | 475 ------------- bridge/opencensus/go.mod | 13 +- bridge/opencensus/go.sum | 4 - bridge/opencensus/opencensusmetric/config.go | 55 ++ .../opencensusmetric/config_test.go | 29 + bridge/opencensus/opencensusmetric/go.mod | 32 + bridge/opencensus/opencensusmetric/go.sum | 100 +++ .../opencensusmetric/internal/metric.go | 214 ++++++ .../opencensusmetric/internal/metric_test.go | 664 ++++++++++++++++++ .../opencensus/opencensusmetric/producer.go | 70 ++ .../opencensusmetric/producer_test.go | 111 +++ bridge/opencensus/opencensusmetric/version.go | 25 + bridge/opencensus/test/go.mod | 6 - bridge/opencensus/test/go.sum | 1 - 17 files changed, 1301 insertions(+), 1163 deletions(-) delete mode 100644 bridge/opencensus/aggregation.go delete mode 100644 bridge/opencensus/aggregation_test.go delete mode 100644 bridge/opencensus/exporter.go delete mode 100644 bridge/opencensus/exporter_test.go create mode 100644 bridge/opencensus/opencensusmetric/config.go create mode 100644 bridge/opencensus/opencensusmetric/config_test.go create mode 100644 bridge/opencensus/opencensusmetric/go.mod create mode 100644 bridge/opencensus/opencensusmetric/go.sum create mode 100644 bridge/opencensus/opencensusmetric/internal/metric.go create mode 100644 bridge/opencensus/opencensusmetric/internal/metric_test.go create mode 100644 bridge/opencensus/opencensusmetric/producer.go create mode 100644 bridge/opencensus/opencensusmetric/producer_test.go create mode 100644 bridge/opencensus/opencensusmetric/version.go diff --git a/bridge/opencensus/aggregation.go b/bridge/opencensus/aggregation.go deleted file mode 100644 index 99d2b07afad0..000000000000 --- a/bridge/opencensus/aggregation.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" - -import ( - "errors" - "fmt" - "time" - - "go.opencensus.io/metric/metricdata" - - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -var ( - errIncompatibleType = errors.New("incompatible type for aggregation") - errEmpty = errors.New("points may not be empty") - errBadPoint = errors.New("point cannot be converted") -) - -type recordFunc func(agg aggregation.Aggregation, end time.Time) error - -// recordAggregationsFromPoints records one OpenTelemetry aggregation for -// each OpenCensus point. Points may not be empty and must be either -// all (int|float)64 or all *metricdata.Distribution. -func recordAggregationsFromPoints(points []metricdata.Point, recorder recordFunc) error { - if len(points) == 0 { - return errEmpty - } - switch t := points[0].Value.(type) { - case int64: - return recordGaugePoints(points, recorder) - case float64: - return recordGaugePoints(points, recorder) - case *metricdata.Distribution: - return recordDistributionPoint(points, recorder) - default: - // TODO add *metricdata.Summary support - return fmt.Errorf("%w: %v", errIncompatibleType, t) - } -} - -var _ aggregation.Aggregation = &ocRawAggregator{} -var _ aggregation.LastValue = &ocRawAggregator{} - -// recordGaugePoints creates an OpenTelemetry aggregation from OpenCensus points. -// Points may not be empty, and must only contain integers or floats. -func recordGaugePoints(pts []metricdata.Point, recorder recordFunc) error { - for _, pt := range pts { - switch t := pt.Value.(type) { - case int64: - if err := recorder(&ocRawAggregator{ - value: number.NewInt64Number(pt.Value.(int64)), - time: pt.Time, - }, pt.Time); err != nil { - return err - } - case float64: - if err := recorder(&ocRawAggregator{ - value: number.NewFloat64Number(pt.Value.(float64)), - time: pt.Time, - }, pt.Time); err != nil { - return err - } - default: - return fmt.Errorf("%w: %v", errIncompatibleType, t) - } - } - return nil -} - -type ocRawAggregator struct { - value number.Number - time time.Time -} - -// Kind returns the kind of aggregation this is. -func (o *ocRawAggregator) Kind() aggregation.Kind { - return aggregation.LastValueKind -} - -// LastValue returns the last point. -func (o *ocRawAggregator) LastValue() (number.Number, time.Time, error) { - return o.value, o.time, nil -} - -var _ aggregation.Aggregation = &ocDistAggregator{} -var _ aggregation.Histogram = &ocDistAggregator{} - -// recordDistributionPoint creates an OpenTelemetry aggregation from -// OpenCensus points. Points may not be empty, and must only contain -// Distributions. The most recent disribution will be used in the aggregation. -func recordDistributionPoint(pts []metricdata.Point, recorder recordFunc) error { - // only use the most recent datapoint for now. - pt := pts[len(pts)-1] - val, ok := pt.Value.(*metricdata.Distribution) - if !ok { - return fmt.Errorf("%w: %v", errBadPoint, pt.Value) - } - bucketCounts := make([]uint64, len(val.Buckets)) - for i, bucket := range val.Buckets { - if bucket.Count < 0 { - return fmt.Errorf("%w: bucket count may not be negative", errBadPoint) - } - bucketCounts[i] = uint64(bucket.Count) - } - if val.Count < 0 { - return fmt.Errorf("%w: count may not be negative", errBadPoint) - } - return recorder(&ocDistAggregator{ - sum: number.NewFloat64Number(val.Sum), - count: uint64(val.Count), - buckets: aggregation.Buckets{ - Boundaries: val.BucketOptions.Bounds, - Counts: bucketCounts, - }, - }, pts[len(pts)-1].Time) -} - -type ocDistAggregator struct { - sum number.Number - count uint64 - buckets aggregation.Buckets -} - -// Kind returns the kind of aggregation this is. -func (o *ocDistAggregator) Kind() aggregation.Kind { - return aggregation.HistogramKind -} - -// Sum returns the sum of values. -func (o *ocDistAggregator) Sum() (number.Number, error) { - return o.sum, nil -} - -// Count returns the number of values. -func (o *ocDistAggregator) Count() (uint64, error) { - return o.count, nil -} - -// Histogram returns the count of events in pre-determined buckets. -func (o *ocDistAggregator) Histogram() (aggregation.Buckets, error) { - return o.buckets, nil -} diff --git a/bridge/opencensus/aggregation_test.go b/bridge/opencensus/aggregation_test.go deleted file mode 100644 index bbaff5c64408..000000000000 --- a/bridge/opencensus/aggregation_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus - -import ( - "errors" - "testing" - "time" - - "go.opencensus.io/metric/metricdata" - - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" -) - -func TestNewAggregationFromPoints(t *testing.T) { - now := time.Now() - for _, tc := range []struct { - desc string - input []metricdata.Point - expectedKind aggregation.Kind - expectedErr error - }{ - { - desc: "no points", - expectedErr: errEmpty, - }, - { - desc: "int point", - input: []metricdata.Point{ - { - Time: now, - Value: int64(23), - }, - }, - expectedKind: aggregation.LastValueKind, - }, - { - desc: "float point", - input: []metricdata.Point{ - { - Time: now, - Value: float64(23), - }, - }, - expectedKind: aggregation.LastValueKind, - }, - { - desc: "distribution point", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedKind: aggregation.HistogramKind, - }, - { - desc: "bad distribution bucket count", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - // negative bucket - {Count: -1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errBadPoint, - }, - { - desc: "bad distribution count", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - // negative count - Count: -2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errBadPoint, - }, - { - desc: "incompatible point type bool", - input: []metricdata.Point{ - { - Time: now, - Value: true, - }, - }, - expectedErr: errIncompatibleType, - }, - { - desc: "dist is incompatible with raw points", - input: []metricdata.Point{ - { - Time: now, - Value: int64(23), - }, - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errIncompatibleType, - }, - { - desc: "int point is incompatible with dist", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - { - Time: now, - Value: int64(23), - }, - }, - expectedErr: errBadPoint, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - var output []aggregation.Aggregation - err := recordAggregationsFromPoints(tc.input, func(agg aggregation.Aggregation, ts time.Time) error { - last := tc.input[len(tc.input)-1] - if ts != last.Time { - t.Errorf("incorrect timestamp %v != %v", ts, last.Time) - } - output = append(output, agg) - return nil - }) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("newAggregationFromPoints(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) - } - for _, out := range output { - if tc.expectedErr == nil && out.Kind() != tc.expectedKind { - t.Errorf("newAggregationFromPoints(%v) = %v, want %v", tc.input, out.Kind(), tc.expectedKind) - } - } - }) - } -} - -func TestLastValueAggregation(t *testing.T) { - now := time.Now() - input := []metricdata.Point{ - {Value: int64(15), Time: now.Add(-time.Minute)}, - {Value: int64(-23), Time: now}, - } - idx := 0 - err := recordAggregationsFromPoints(input, func(agg aggregation.Aggregation, end time.Time) error { - if agg.Kind() != aggregation.LastValueKind { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, agg.Kind(), aggregation.LastValueKind) - } - if end != input[idx].Time { - t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, input[idx].Time) - } - pointsLV, ok := agg.(aggregation.LastValue) - if !ok { - t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.LastValue interface", input, agg) - } - lv, ts, _ := pointsLV.LastValue() - if lv.AsInt64() != input[idx].Value { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, lv.AsInt64(), input[idx].Value) - } - if ts != input[idx].Time { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, ts, input[idx].Time) - } - idx++ - return nil - }) - if err != nil { - t.Errorf("recordAggregationsFromPoints(%v) = unexpected error %v", input, err) - } -} - -func TestHistogramAggregation(t *testing.T) { - now := time.Now() - input := []metricdata.Point{ - { - Value: &metricdata.Distribution{ - Count: 0, - Sum: 0, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 0}, - {Count: 0}, - }, - }, - }, - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - } - var output aggregation.Aggregation - var end time.Time - err := recordAggregationsFromPoints(input, func(argAgg aggregation.Aggregation, argEnd time.Time) error { - output = argAgg - end = argEnd - return nil - }) - if err != nil { - t.Fatalf("recordAggregationsFromPoints(%v) = err(%v), want ", input, err) - } - if output.Kind() != aggregation.HistogramKind { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, output.Kind(), aggregation.HistogramKind) - } - if !end.Equal(now) { - t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, now) - } - distAgg, ok := output.(aggregation.Histogram) - if !ok { - t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.Points interface", input, output) - } - sum, err := distAgg.Sum() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - if sum.AsFloat64() != float64(55) { - t.Errorf("recordAggregationsFromPoints(%v).Sum() = %v, want %v", input, sum.AsFloat64(), float64(55)) - } - count, err := distAgg.Count() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - if count != 2 { - t.Errorf("recordAggregationsFromPoints(%v).Count() = %v, want %v", input, count, 2) - } - hist, err := distAgg.Histogram() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - inputBucketBoundaries := []float64{20, 30} - if len(hist.Boundaries) != len(inputBucketBoundaries) { - t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d boundaries, want %d boundaries", input, len(hist.Boundaries), len(inputBucketBoundaries)) - } - for i, b := range hist.Boundaries { - if b != inputBucketBoundaries[i] { - t.Errorf("recordAggregationsFromPoints(%v).Histogram().Boundaries[%d] = %v, want %v", input, i, b, inputBucketBoundaries[i]) - } - } - inputBucketCounts := []uint64{1, 1} - if len(hist.Counts) != len(inputBucketCounts) { - t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d buckets, want %d buckets", input, len(hist.Counts), len(inputBucketCounts)) - } - for i, c := range hist.Counts { - if c != inputBucketCounts[i] { - t.Errorf("recordAggregationsFromPoints(%v).Histogram().Counts[%d] = %d, want %d", input, i, c, inputBucketCounts[i]) - } - } -} diff --git a/bridge/opencensus/exporter.go b/bridge/opencensus/exporter.go deleted file mode 100644 index 7e7e79600071..000000000000 --- a/bridge/opencensus/exporter.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" - ocresource "go.opencensus.io/resource" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -var errConversion = errors.New("unable to convert from OpenCensus to OpenTelemetry") - -// NewMetricExporter returns an OpenCensus exporter that exports to an -// OpenTelemetry exporter. -func NewMetricExporter(base export.Exporter) metricexport.Exporter { - return &exporter{base: base} -} - -// exporter implements the OpenCensus metric Exporter interface using an -// OpenTelemetry base exporter. -type exporter struct { - base export.Exporter -} - -// ExportMetrics implements the OpenCensus metric Exporter interface. -func (e *exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - res := resource.Empty() - if len(metrics) != 0 { - res = convertResource(metrics[0].Resource) - } - return e.base.Export(ctx, res, &censusLibraryReader{metrics: metrics}) -} - -type censusLibraryReader struct { - metrics []*metricdata.Metric -} - -func (r censusLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error { - return readerFunc(instrumentation.Library{ - Name: "OpenCensus Bridge", - }, &metricReader{metrics: r.metrics}) -} - -type metricReader struct { - // RWMutex implements locking for the `Reader` interface. - sync.RWMutex - metrics []*metricdata.Metric -} - -var _ export.Reader = &metricReader{} - -// ForEach iterates through the metrics data, synthesizing an -// export.Record with the appropriate aggregation for the exporter. -func (d *metricReader) ForEach(_ aggregation.TemporalitySelector, f func(export.Record) error) error { - for _, m := range d.metrics { - descriptor, err := convertDescriptor(m.Descriptor) - if err != nil { - otel.Handle(err) - continue - } - for _, ts := range m.TimeSeries { - if len(ts.Points) == 0 { - continue - } - attrs, err := convertAttrs(m.Descriptor.LabelKeys, ts.LabelValues) - if err != nil { - otel.Handle(err) - continue - } - err = recordAggregationsFromPoints( - ts.Points, - func(agg aggregation.Aggregation, end time.Time) error { - return f(export.NewRecord( - &descriptor, - &attrs, - agg, - ts.StartTime, - end, - )) - }) - if err != nil && !errors.Is(err, aggregation.ErrNoData) { - return err - } - } - } - return nil -} - -// convertAttrs converts from OpenCensus attribute keys and values to an -// OpenTelemetry attribute Set. -func convertAttrs(keys []metricdata.LabelKey, values []metricdata.LabelValue) (attribute.Set, error) { - if len(keys) != len(values) { - return attribute.NewSet(), fmt.Errorf("%w different number of label keys (%d) and values (%d)", errConversion, len(keys), len(values)) - } - attrs := []attribute.KeyValue{} - for i, lv := range values { - if !lv.Present { - continue - } - attrs = append(attrs, attribute.KeyValue{ - Key: attribute.Key(keys[i].Key), - Value: attribute.StringValue(lv.Value), - }) - } - return attribute.NewSet(attrs...), nil -} - -// convertResource converts an OpenCensus Resource to an OpenTelemetry Resource -// Note: the ocresource.Resource Type field is not used. -func convertResource(res *ocresource.Resource) *resource.Resource { - attrs := []attribute.KeyValue{} - if res == nil { - return nil - } - for k, v := range res.Labels { - attrs = append(attrs, attribute.KeyValue{Key: attribute.Key(k), Value: attribute.StringValue(v)}) - } - return resource.NewSchemaless(attrs...) -} - -// convertDescriptor converts an OpenCensus Descriptor to an OpenTelemetry Descriptor. -func convertDescriptor(ocDescriptor metricdata.Descriptor) (sdkapi.Descriptor, error) { - var ( - nkind number.Kind - ikind sdkapi.InstrumentKind - ) - switch ocDescriptor.Type { - case metricdata.TypeGaugeInt64: - nkind = number.Int64Kind - ikind = sdkapi.GaugeObserverInstrumentKind - case metricdata.TypeGaugeFloat64: - nkind = number.Float64Kind - ikind = sdkapi.GaugeObserverInstrumentKind - case metricdata.TypeCumulativeInt64: - nkind = number.Int64Kind - ikind = sdkapi.CounterObserverInstrumentKind - case metricdata.TypeCumulativeFloat64: - nkind = number.Float64Kind - ikind = sdkapi.CounterObserverInstrumentKind - default: - // Includes TypeGaugeDistribution, TypeCumulativeDistribution, TypeSummary - return sdkapi.Descriptor{}, fmt.Errorf("%w; descriptor type: %v", errConversion, ocDescriptor.Type) - } - opts := []instrument.Option{ - instrument.WithDescription(ocDescriptor.Description), - } - switch ocDescriptor.Unit { - case metricdata.UnitDimensionless: - opts = append(opts, instrument.WithUnit(unit.Dimensionless)) - case metricdata.UnitBytes: - opts = append(opts, instrument.WithUnit(unit.Bytes)) - case metricdata.UnitMilliseconds: - opts = append(opts, instrument.WithUnit(unit.Milliseconds)) - } - cfg := instrument.NewConfig(opts...) - return sdkapi.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil -} diff --git a/bridge/opencensus/exporter_test.go b/bridge/opencensus/exporter_test.go deleted file mode 100644 index 2634f5334d5f..000000000000 --- a/bridge/opencensus/exporter_test.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "go.opencensus.io/metric/metricdata" - ocresource "go.opencensus.io/resource" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -type fakeExporter struct { - export.Exporter - records []export.Record - resource *resource.Resource - err error -} - -func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error { - return controllertest.ReadAll(ilr, aggregation.StatelessTemporalitySelector(), - func(_ instrumentation.Library, record export.Record) error { - f.resource = res - f.records = append(f.records, record) - return f.err - }) -} - -type fakeErrorHandler struct { - err error -} - -func (f *fakeErrorHandler) Handle(err error) { - f.err = err -} - -func (f *fakeErrorHandler) matches(err error) error { - // make sure err is cleared for the next test - defer func() { f.err = nil }() - if !errors.Is(f.err, err) { - return fmt.Errorf("err(%v), want err(%v)", f.err, err) - } - return nil -} - -func TestExportMetrics(t *testing.T) { - now := time.Now() - basicDesc := metrictest.NewDescriptor( - "", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - ) - fakeErrorHandler := &fakeErrorHandler{} - otel.SetErrorHandler(fakeErrorHandler) - for _, tc := range []struct { - desc string - input []*metricdata.Metric - exportErr error - expected []export.Record - expectedResource *resource.Resource - expectedHandledError error - }{ - { - desc: "no metrics", - }, - { - desc: "metric without points is dropped", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - {}, - }, - }, - }, - }, - { - desc: "descriptor conversion error", - input: []*metricdata.Metric{ - // TypeGaugeDistribution isn't supported - {Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}}, - }, - expectedHandledError: errConversion, - }, - { - desc: "attrs conversion error", - input: []*metricdata.Metric{ - { - // No descriptor with attribute keys. - TimeSeries: []*metricdata.TimeSeries{ - // 1 attribute value, which doens't exist in keys. - { - LabelValues: []metricdata.LabelValue{{Value: "foo", Present: true}}, - Points: []metricdata.Point{ - {}, - }, - }, - }, - }, - }, - expectedHandledError: errConversion, - }, - { - desc: "unsupported summary point type", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - Points: []metricdata.Point{ - {Value: &metricdata.Summary{}}, - }, - }, - }, - }, - }, - exportErr: errIncompatibleType, - }, - { - desc: "success", - input: []*metricdata.Metric{ - { - Resource: &ocresource.Resource{ - Labels: map[string]string{ - "R1": "V1", - "R2": "V2", - }, - }, - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - }, - expectedResource: resource.NewSchemaless( - attribute.String("R1", "V1"), - attribute.String("R2", "V2"), - ), - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - }, - { - desc: "export error after success", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - }, - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - exportErr: errors.New("failed to export"), - }, - { - desc: "partial success sends correct metrics and drops incorrect metrics with handled err", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - // TypeGaugeDistribution isn't supported - {Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}}, - }, - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - expectedHandledError: errConversion, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - fakeExporter := &fakeExporter{err: tc.exportErr} - err := NewMetricExporter(fakeExporter).ExportMetrics(context.Background(), tc.input) - if !errors.Is(err, tc.exportErr) { - t.Errorf("NewMetricExporter(%+v) = err(%v), want err(%v)", tc.input, err, tc.exportErr) - } - // Check the global error handler, since we don't return errors - // which occur during conversion. - err = fakeErrorHandler.matches(tc.expectedHandledError) - if err != nil { - t.Fatalf("ExportMetrics(%+v) = %v", tc.input, err) - } - output := fakeExporter.records - if len(tc.expected) != len(output) { - t.Fatalf("ExportMetrics(%+v) = %d records, want %d records", tc.input, len(output), len(tc.expected)) - } - if fakeExporter.resource.String() != tc.expectedResource.String() { - t.Errorf("ExportMetrics(%+v)[i].Resource() = %+v, want %+v", tc.input, fakeExporter.resource.String(), tc.expectedResource.String()) - } - for i, expected := range tc.expected { - if output[i].StartTime() != expected.StartTime() { - t.Errorf("ExportMetrics(%+v)[i].StartTime() = %+v, want %+v", tc.input, output[i].StartTime(), expected.StartTime()) - } - if output[i].EndTime() != expected.EndTime() { - t.Errorf("ExportMetrics(%+v)[i].EndTime() = %+v, want %+v", tc.input, output[i].EndTime(), expected.EndTime()) - } - if output[i].Descriptor().Name() != expected.Descriptor().Name() { - t.Errorf("ExportMetrics(%+v)[i].Descriptor() = %+v, want %+v", tc.input, output[i].Descriptor().Name(), expected.Descriptor().Name()) - } - // Don't bother with a complete check of the descriptor. - // That is checked as part of descriptor conversion tests below. - if !output[i].Attributes().Equals(expected.Attributes()) { - t.Errorf("ExportMetrics(%+v)[i].Attributes() = %+v, want %+v", tc.input, output[i].Attributes(), expected.Attributes()) - } - if output[i].Aggregation().Kind() != expected.Aggregation().Kind() { - t.Errorf("ExportMetrics(%+v)[i].Aggregation() = %+v, want %+v", tc.input, output[i].Aggregation().Kind(), expected.Aggregation().Kind()) - } - // Don't bother checking the contents of the points aggregation. - // Those tests are done with the aggregations themselves - } - }) - } -} - -func TestConvertAttributes(t *testing.T) { - setWithMultipleKeys := attribute.NewSet( - attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")}, - attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")}, - ) - for _, tc := range []struct { - desc string - inputKeys []metricdata.LabelKey - inputValues []metricdata.LabelValue - expected *attribute.Set - expectedErr error - }{ - { - desc: "no attributes", - expected: attribute.EmptySet(), - }, - { - desc: "different numbers of keys and values", - inputKeys: []metricdata.LabelKey{{Key: "foo"}}, - expected: attribute.EmptySet(), - expectedErr: errConversion, - }, - { - desc: "multiple keys and values", - inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}}, - inputValues: []metricdata.LabelValue{ - {Value: "1", Present: true}, - {Value: "2", Present: true}, - }, - expected: &setWithMultipleKeys, - }, - { - desc: "multiple keys and values with some not present", - inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}}, - inputValues: []metricdata.LabelValue{ - {Value: "1", Present: true}, - {Value: "2", Present: true}, - {Present: false}, - }, - expected: &setWithMultipleKeys, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output, err := convertAttrs(tc.inputKeys, tc.inputValues) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) - } - if !output.Equals(tc.expected) { - t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) - } - }) - } -} -func TestConvertResource(t *testing.T) { - for _, tc := range []struct { - desc string - input *ocresource.Resource - expected *resource.Resource - }{ - { - desc: "nil resource", - }, - { - desc: "empty resource", - input: &ocresource.Resource{ - Labels: map[string]string{}, - }, - expected: resource.NewSchemaless(), - }, - { - desc: "resource with attributes", - input: &ocresource.Resource{ - Labels: map[string]string{ - "foo": "bar", - "tick": "tock", - }, - }, - expected: resource.NewSchemaless( - attribute.KeyValue{Key: attribute.Key("foo"), Value: attribute.StringValue("bar")}, - attribute.KeyValue{Key: attribute.Key("tick"), Value: attribute.StringValue("tock")}, - ), - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output := convertResource(tc.input) - if !output.Equal(tc.expected) { - t.Errorf("convertResource(%v) = %+v, want %+v", tc.input, output, tc.expected) - } - }) - } -} -func TestConvertDescriptor(t *testing.T) { - for _, tc := range []struct { - desc string - input metricdata.Descriptor - expected sdkapi.Descriptor - expectedErr error - }{ - { - desc: "empty descriptor", - expected: metrictest.NewDescriptor( - "", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - ), - }, - { - desc: "gauge int64 bytes", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitBytes, - Type: metricdata.TypeGaugeInt64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Bytes), - ), - }, - { - desc: "gauge float64 ms", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitMilliseconds, - Type: metricdata.TypeGaugeFloat64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.GaugeObserverInstrumentKind, - number.Float64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Milliseconds), - ), - }, - { - desc: "cumulative int64 dimensionless", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitDimensionless, - Type: metricdata.TypeCumulativeInt64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.CounterObserverInstrumentKind, - number.Int64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Dimensionless), - ), - }, - { - desc: "cumulative float64 dimensionless", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitDimensionless, - Type: metricdata.TypeCumulativeFloat64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.CounterObserverInstrumentKind, - number.Float64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Dimensionless), - ), - }, - { - desc: "incompatible TypeCumulativeDistribution", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Type: metricdata.TypeCumulativeDistribution, - }, - expectedErr: errConversion, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output, err := convertDescriptor(tc.input) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("convertDescriptor(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) - } - if output != tc.expected { - t.Errorf("convertDescriptor(%v) = %+v, want %+v", tc.input, output, tc.expected) - } - }) - } -} diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod index 435653777e7a..954a55c574b7 100644 --- a/bridge/opencensus/go.mod +++ b/bridge/opencensus/go.mod @@ -1,30 +1,19 @@ module go.opentelemetry.io/otel/bridge/opencensus -go 1.17 +go 1.18 require ( go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f go.opentelemetry.io/otel v1.9.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.9.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 go.opentelemetry.io/otel/trace v1.9.0 ) require ( - github.com/benbjohnson/clock v1.3.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect ) replace go.opentelemetry.io/otel => ../.. -replace go.opentelemetry.io/otel/sdk => ../../sdk - -replace go.opentelemetry.io/otel/metric => ../../metric - -replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric - replace go.opentelemetry.io/otel/trace => ../../trace diff --git a/bridge/opencensus/go.sum b/bridge/opencensus/go.sum index e42b184d238e..2e90b382eb90 100644 --- a/bridge/opencensus/go.sum +++ b/bridge/opencensus/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -42,8 +40,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/bridge/opencensus/opencensusmetric/config.go b/bridge/opencensus/opencensusmetric/config.go new file mode 100644 index 000000000000..7d79f1dcb74d --- /dev/null +++ b/bridge/opencensus/opencensusmetric/config.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" + +import ( + "go.opentelemetry.io/otel/sdk/resource" +) + +// Config contains configuration options for a Producer. +type Config struct { + res *resource.Resource +} + +// Option applies an option to a Config. +type Option interface { + apply(Config) Config +} + +type optionFunc func(Config) Config + +func (fn optionFunc) apply(cfg Config) Config { + return fn(cfg) +} + +// WithResource sets the resource for the bridge. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(cfg Config) Config { + cfg.res = res + return cfg + }) +} + +// newConfig applies all the options to a returned Config. +func newConfig(options []Option) Config { + var cfg Config + for _, option := range options { + cfg = option.apply(cfg) + } + if cfg.res == nil { + cfg.res = resource.Default() + } + return cfg +} diff --git a/bridge/opencensus/opencensusmetric/config_test.go b/bridge/opencensus/opencensusmetric/config_test.go new file mode 100644 index 000000000000..1850199664ab --- /dev/null +++ b/bridge/opencensus/opencensusmetric/config_test.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestWithResource(t *testing.T) { + res := resource.NewSchemaless() + c := newConfig([]Option{WithResource(res)}) + assert.Same(t, res, c.res) +} diff --git a/bridge/opencensus/opencensusmetric/go.mod b/bridge/opencensus/opencensusmetric/go.mod new file mode 100644 index 000000000000..a79a0d4f56f9 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/go.mod @@ -0,0 +1,32 @@ +module go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric + +go 1.18 + +require ( + github.com/stretchr/testify v1.7.1 + go.opencensus.io v0.23.0 + go.opentelemetry.io/otel v1.9.0 + go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/otel/trace v1.9.0 // indirect + golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) + +replace go.opentelemetry.io/otel => ../../.. + +replace go.opentelemetry.io/otel/sdk => ../../../sdk + +replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../../trace diff --git a/bridge/opencensus/opencensusmetric/go.sum b/bridge/opencensus/opencensusmetric/go.sum new file mode 100644 index 000000000000..8b7a99fc6fb6 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/go.sum @@ -0,0 +1,100 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/bridge/opencensus/opencensusmetric/internal/metric.go b/bridge/opencensus/opencensusmetric/internal/metric.go new file mode 100644 index 000000000000..240a95537ddf --- /dev/null +++ b/bridge/opencensus/opencensusmetric/internal/metric.go @@ -0,0 +1,214 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" + +import ( + "errors" + "fmt" + + ocmetricdata "go.opencensus.io/metric/metricdata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var ( + errConversion = errors.New("converting from OpenCensus to OpenTelemetry") + errAggregationType = errors.New("unsupported OpenCensus aggregation type") + errMismatchedValueTypes = errors.New("wrong value type for data point") + errNumberDataPoint = errors.New("converting a number data point") + errHistogramDataPoint = errors.New("converting a histogram data point") + errNegativeDistributionCount = errors.New("distribution count is negative") + errNegativeBucketCount = errors.New("distribution bucket count is negative") + errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values") +) + +// ConvertMetrics converts metric data from OpenCensus to OpenTelemetry. +func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, error) { + otelMetrics := make([]metricdata.Metrics, 0, len(ocmetrics)) + var errInfo []string + for _, ocm := range ocmetrics { + if ocm == nil { + continue + } + agg, err := convertAggregation(ocm) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + otelMetrics = append(otelMetrics, metricdata.Metrics{ + Name: ocm.Descriptor.Name, + Description: ocm.Descriptor.Description, + Unit: convertUnit(ocm.Descriptor.Unit), + Data: agg, + }) + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %q", errConversion, errInfo) + } + return otelMetrics, aggregatedError +} + +// convertAggregation produces an aggregation based on the opencensus Metric. +func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, error) { + labelKeys := metric.Descriptor.LabelKeys + switch metric.Descriptor.Type { + case ocmetricdata.TypeGaugeInt64: + return convertGauge[int64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeGaugeFloat64: + return convertGauge[float64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeInt64: + return convertSum[int64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeFloat64: + return convertSum[float64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeDistribution: + return convertHistogram(labelKeys, metric.TimeSeries) + // TODO: Support summaries, once it is in the OTel data types. + } + return nil, fmt.Errorf("%w: %q", errAggregationType, metric.Descriptor.Type) +} + +// convertGauge converts an OpenCensus gauge to an OpenTelemetry gauge aggregation. +func convertGauge[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Gauge[N], error) { + points, err := convertNumberDataPoints[N](labelKeys, ts) + return metricdata.Gauge[N]{DataPoints: points}, err +} + +// convertSum converts an OpenCensus cumulative to an OpenTelemetry sum aggregation. +func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Sum[N], error) { + points, err := convertNumberDataPoints[N](labelKeys, ts) + // OpenCensus sums are always Cumulative + return metricdata.Sum[N]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, err +} + +// convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints. +func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) { + var points []metricdata.DataPoint[N] + var errInfo []string + for _, t := range ts { + attrs, err := convertAttrs(labelKeys, t.LabelValues) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + for _, p := range t.Points { + v, ok := p.Value.(N) + if !ok { + errInfo = append(errInfo, fmt.Sprintf("%v: %q", errMismatchedValueTypes, p.Value)) + continue + } + points = append(points, metricdata.DataPoint[N]{ + Attributes: attrs, + StartTime: t.StartTime, + Time: p.Time, + Value: v, + }) + } + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %v", errNumberDataPoint, errInfo) + } + return points, aggregatedError +} + +// convertHistogram converts OpenCensus Distribution timeseries to an OpenTelemetry Histogram aggregation. +func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram, error) { + points := make([]metricdata.HistogramDataPoint, 0, len(ts)) + var errInfo []string + for _, t := range ts { + attrs, err := convertAttrs(labelKeys, t.LabelValues) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + for _, p := range t.Points { + dist, ok := p.Value.(*ocmetricdata.Distribution) + if !ok { + errInfo = append(errInfo, fmt.Sprintf("%v: %d", errMismatchedValueTypes, p.Value)) + continue + } + bucketCounts, err := convertBucketCounts(dist.Buckets) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + if dist.Count < 0 { + errInfo = append(errInfo, fmt.Sprintf("%v: %d", errNegativeDistributionCount, dist.Count)) + continue + } + // TODO: handle exemplars + points = append(points, metricdata.HistogramDataPoint{ + Attributes: attrs, + StartTime: t.StartTime, + Time: p.Time, + Count: uint64(dist.Count), + Sum: dist.Sum, + Bounds: dist.BucketOptions.Bounds, + BucketCounts: bucketCounts, + }) + } + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %v", errHistogramDataPoint, errInfo) + } + return metricdata.Histogram{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, aggregatedError +} + +func convertBucketCounts(buckets []ocmetricdata.Bucket) ([]uint64, error) { + bucketCounts := make([]uint64, len(buckets)) + for i, bucket := range buckets { + if bucket.Count < 0 { + return nil, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count) + } + bucketCounts[i] = uint64(bucket.Count) + } + return bucketCounts, nil +} + +// convertAttrs converts from OpenCensus attribute keys and values to an +// OpenTelemetry attribute Set. +func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) { + if len(keys) != len(values) { + return attribute.NewSet(), fmt.Errorf("%w: keys(%q) values(%q)", errMismatchedAttributeKeyValues, len(keys), len(values)) + } + attrs := []attribute.KeyValue{} + for i, lv := range values { + if !lv.Present { + continue + } + attrs = append(attrs, attribute.KeyValue{ + Key: attribute.Key(keys[i].Key), + Value: attribute.StringValue(lv.Value), + }) + } + return attribute.NewSet(attrs...), nil +} + +// convertUnit converts from the OpenCensus unit to OpenTelemetry unit. +func convertUnit(u ocmetricdata.Unit) unit.Unit { + switch u { + case ocmetricdata.UnitDimensionless: + return unit.Dimensionless + case ocmetricdata.UnitBytes: + return unit.Bytes + case ocmetricdata.UnitMilliseconds: + return unit.Milliseconds + } + return unit.Unit("") +} diff --git a/bridge/opencensus/opencensusmetric/internal/metric_test.go b/bridge/opencensus/opencensusmetric/internal/metric_test.go new file mode 100644 index 000000000000..b93bc4130884 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/internal/metric_test.go @@ -0,0 +1,664 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" + +import ( + "errors" + "testing" + "time" + + ocmetricdata "go.opencensus.io/metric/metricdata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestConvertMetrics(t *testing.T) { + endTime1 := time.Now() + endTime2 := endTime1.Add(-time.Millisecond) + startTime := endTime2.Add(-time.Minute) + for _, tc := range []struct { + desc string + input []*ocmetricdata.Metric + expected []metricdata.Metrics + expectedErr error + }{ + { + desc: "empty", + expected: []metricdata.Metrics{}, + }, + { + desc: "normal Histogram, gauges, and sums", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "a"}, + {Key: "b"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "hello", + Present: true, + }, { + Value: "world", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Count: 8, + Sum: 100.0, + BucketOptions: &ocmetricdata.BucketOptions{ + Bounds: []float64{1.0, 2.0, 3.0}, + }, + Buckets: []ocmetricdata.Bucket{ + {Count: 1}, + {Count: 2}, + {Count: 5}, + }, + }), + ocmetricdata.NewDistributionPoint(endTime2, &ocmetricdata.Distribution{ + Count: 10, + Sum: 110.0, + BucketOptions: &ocmetricdata.BucketOptions{ + Bounds: []float64{1.0, 2.0, 3.0}, + }, + Buckets: []ocmetricdata.Bucket{ + {Count: 1}, + {Count: 4}, + {Count: 5}, + }, + }), + }, + StartTime: startTime, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: ocmetricdata.UnitBytes, + Type: ocmetricdata.TypeGaugeInt64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "c"}, + {Key: "d"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "foo", + Present: true, + }, { + Value: "bar", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewInt64Point(endTime1, 123), + ocmetricdata.NewInt64Point(endTime2, 1236), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-b", + Description: "a float testing gauge", + Unit: ocmetricdata.UnitBytes, + Type: ocmetricdata.TypeGaugeFloat64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "cf"}, + {Key: "df"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "foof", + Present: true, + }, { + Value: "barf", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 123.4), + ocmetricdata.NewFloat64Point(endTime2, 1236.7), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-a", + Description: "an int testing sum", + Unit: ocmetricdata.UnitMilliseconds, + Type: ocmetricdata.TypeCumulativeInt64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "e"}, + {Key: "f"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "zig", + Present: true, + }, { + Value: "zag", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewInt64Point(endTime1, 13), + ocmetricdata.NewInt64Point(endTime2, 14), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-b", + Description: "a float testing sum", + Unit: ocmetricdata.UnitMilliseconds, + Type: ocmetricdata.TypeCumulativeFloat64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "e"}, + {Key: "f"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "zig", + Present: true, + }, { + Value: "zag", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 12.3), + ocmetricdata.NewFloat64Point(endTime2, 123.4), + }, + }, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: unit.Dimensionless, + Data: metricdata.Histogram{ + DataPoints: []metricdata.HistogramDataPoint{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("a"), + Value: attribute.StringValue("hello"), + }, attribute.KeyValue{ + Key: attribute.Key("b"), + Value: attribute.StringValue("world"), + }), + StartTime: startTime, + Time: endTime1, + Count: 8, + Sum: 100.0, + Bounds: []float64{1.0, 2.0, 3.0}, + BucketCounts: []uint64{1, 2, 5}, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("a"), + Value: attribute.StringValue("hello"), + }, attribute.KeyValue{ + Key: attribute.Key("b"), + Value: attribute.StringValue("world"), + }), + StartTime: startTime, + Time: endTime2, + Count: 10, + Sum: 110.0, + Bounds: []float64{1.0, 2.0, 3.0}, + BucketCounts: []uint64{1, 4, 5}, + }, + }, + Temporality: metricdata.CumulativeTemporality, + }, + }, { + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: unit.Bytes, + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("c"), + Value: attribute.StringValue("foo"), + }, attribute.KeyValue{ + Key: attribute.Key("d"), + Value: attribute.StringValue("bar"), + }), + Time: endTime1, + Value: 123, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("c"), + Value: attribute.StringValue("foo"), + }, attribute.KeyValue{ + Key: attribute.Key("d"), + Value: attribute.StringValue("bar"), + }), + Time: endTime2, + Value: 1236, + }, + }, + }, + }, { + Name: "foo.com/gauge-b", + Description: "a float testing gauge", + Unit: unit.Bytes, + Data: metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("cf"), + Value: attribute.StringValue("foof"), + }, attribute.KeyValue{ + Key: attribute.Key("df"), + Value: attribute.StringValue("barf"), + }), + Time: endTime1, + Value: 123.4, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("cf"), + Value: attribute.StringValue("foof"), + }, attribute.KeyValue{ + Key: attribute.Key("df"), + Value: attribute.StringValue("barf"), + }), + Time: endTime2, + Value: 1236.7, + }, + }, + }, + }, { + Name: "foo.com/sum-a", + Description: "an int testing sum", + Unit: unit.Milliseconds, + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime1, + Value: 13, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime2, + Value: 14, + }, + }, + }, + }, { + Name: "foo.com/sum-b", + Description: "a float testing sum", + Unit: unit.Milliseconds, + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[float64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime1, + Value: 12.3, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime2, + Value: 123.4, + }, + }, + }, + }, + }, + }, { + desc: "histogram without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: unit.Dimensionless, + Data: metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{}, + }, + }, + }, + }, { + desc: "sum without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-a", + Description: "a testing sum", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeFloat64, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/sum-a", + Description: "a testing sum", + Unit: unit.Dimensionless, + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[float64]{}, + }, + }, + }, + }, { + desc: "gauge without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-a", + Description: "a testing gauge", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeInt64, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/gauge-a", + Description: "a testing gauge", + Unit: unit.Dimensionless, + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{}, + }, + }, + }, + }, { + desc: "histogram with negative count", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Count: -8, + }), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "histogram with negative bucket count", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Buckets: []ocmetricdata.Bucket{ + {Count: -1}, + {Count: 2}, + {Count: 5}, + }, + }), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "histogram with non-histogram datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 1.0), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "sum with non-sum datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeFloat64, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "gauge with non-gauge datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeFloat64, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "unsupported Gauge Distribution type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeDistribution, + }, + }, + }, + expectedErr: errConversion, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output, err := ConvertMetrics(tc.input) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("convertAggregation(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) + } + metricdatatest.AssertEqual[metricdata.ScopeMetrics](t, + metricdata.ScopeMetrics{Metrics: tc.expected}, + metricdata.ScopeMetrics{Metrics: output}) + }) + } +} + +func TestConvertUnits(t *testing.T) { + var noUnit unit.Unit + for _, tc := range []struct { + desc string + input ocmetricdata.Unit + expected unit.Unit + }{{ + desc: "unspecified unit", + expected: noUnit, + }, { + desc: "dimensionless", + input: ocmetricdata.UnitDimensionless, + expected: unit.Dimensionless, + }, { + desc: "milliseconds", + input: ocmetricdata.UnitMilliseconds, + expected: unit.Milliseconds, + }, { + desc: "bytes", + input: ocmetricdata.UnitBytes, + expected: unit.Bytes, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output := convertUnit(tc.input) + if output != tc.expected { + t.Errorf("convertUnit(%v) = %q, want %q", tc.input, output, tc.expected) + } + }) + } +} + +func TestConvertAttributes(t *testing.T) { + setWithMultipleKeys := attribute.NewSet( + attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")}, + attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")}, + ) + for _, tc := range []struct { + desc string + inputKeys []ocmetricdata.LabelKey + inputValues []ocmetricdata.LabelValue + expected *attribute.Set + expectedErr error + }{ + { + desc: "no attributes", + expected: attribute.EmptySet(), + }, + { + desc: "different numbers of keys and values", + inputKeys: []ocmetricdata.LabelKey{{Key: "foo"}}, + expected: attribute.EmptySet(), + expectedErr: errMismatchedAttributeKeyValues, + }, + { + desc: "multiple keys and values", + inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}}, + inputValues: []ocmetricdata.LabelValue{ + {Value: "1", Present: true}, + {Value: "2", Present: true}, + }, + expected: &setWithMultipleKeys, + }, + { + desc: "multiple keys and values with some not present", + inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}}, + inputValues: []ocmetricdata.LabelValue{ + {Value: "1", Present: true}, + {Value: "2", Present: true}, + {Present: false}, + }, + expected: &setWithMultipleKeys, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output, err := convertAttrs(tc.inputKeys, tc.inputValues) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) + } + if !output.Equals(tc.expected) { + t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) + } + }) + } +} diff --git a/bridge/opencensus/opencensusmetric/producer.go b/bridge/opencensus/opencensusmetric/producer.go new file mode 100644 index 000000000000..8a5a84b98113 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/producer.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" + +import ( + "context" + + ocmetricdata "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + + "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +const ( + // instrumentationName is the name of this instrumentation package. + instrumentationName = "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" +) + +// producer is a producer which provides metrics collected using OpenCensus +// instrumentation. +type producer struct { + res *resource.Resource + scope instrumentation.Scope + manager *metricproducer.Manager +} + +// NewProducer returns a producer which can be invoked to collect metrics. +func NewProducer(opts ...Option) metric.Producer { + cfg := newConfig(opts) + return &producer{ + res: cfg.res, + scope: instrumentation.Scope{Name: instrumentationName, Version: SemVersion()}, + manager: metricproducer.GlobalManager(), + } +} + +// Produce gathers all metrics from the OpenCensus in-memory state. +func (p *producer) Produce(context.Context) (metricdata.ResourceMetrics, error) { + producers := p.manager.GetAll() + data := []*ocmetricdata.Metric{} + for _, ocProducer := range producers { + data = append(data, ocProducer.Read()...) + } + otelMetrics, err := internal.ConvertMetrics(data) + return metricdata.ResourceMetrics{ + Resource: p.res, + ScopeMetrics: []metricdata.ScopeMetrics{ + { + Scope: p.scope, + Metrics: otelMetrics, + }, + }, + }, err +} diff --git a/bridge/opencensus/opencensusmetric/producer_test.go b/bridge/opencensus/opencensusmetric/producer_test.go new file mode 100644 index 000000000000..c8890e6f9f4d --- /dev/null +++ b/bridge/opencensus/opencensusmetric/producer_test.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + ocmetricdata "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestProducePartialError(t *testing.T) { + badProducer := &fakeOCProducer{ + metrics: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeDistribution, + }, + }, + }, + } + metricproducer.GlobalManager().AddProducer(badProducer) + defer metricproducer.GlobalManager().DeleteProducer(badProducer) + + end := time.Now() + goodProducer := &fakeOCProducer{ + metrics: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: ocmetricdata.UnitBytes, + Type: ocmetricdata.TypeGaugeInt64, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewInt64Point(end, 123), + }, + }, + }, + }, + }, + } + metricproducer.GlobalManager().AddProducer(goodProducer) + defer metricproducer.GlobalManager().DeleteProducer(goodProducer) + + res := resource.NewSchemaless(attribute.String("k1", "v11"), attribute.String("k1", "v12")) + + otelProducer := NewProducer(WithResource(res)) + out, err := otelProducer.Produce(context.Background()) + assert.NotNil(t, err) + expected := metricdata.ResourceMetrics{ + Resource: res, + ScopeMetrics: []metricdata.ScopeMetrics{ + { + Scope: instrumentation.Scope{Name: instrumentationName, Version: SemVersion()}, + Metrics: []metricdata.Metrics{ + { + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: unit.Bytes, + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(), + Time: end, + Value: 123, + }, + }, + }, + }, + }, + }, + }, + } + metricdatatest.AssertEqual[metricdata.ResourceMetrics](t, out, expected) +} + +type fakeOCProducer struct { + metrics []*ocmetricdata.Metric +} + +func (f *fakeOCProducer) Read() []*ocmetricdata.Metric { + return f.metrics +} diff --git a/bridge/opencensus/opencensusmetric/version.go b/bridge/opencensus/opencensusmetric/version.go new file mode 100644 index 000000000000..0668fe65fbbc --- /dev/null +++ b/bridge/opencensus/opencensusmetric/version.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" + +// Version is the current release version of the opencensus bridge. +func Version() string { + return "0.31.0" +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/bridge/opencensus/test/go.mod b/bridge/opencensus/test/go.mod index 699392faba1e..9a40bac93c6c 100644 --- a/bridge/opencensus/test/go.mod +++ b/bridge/opencensus/test/go.mod @@ -14,8 +14,6 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect ) @@ -23,10 +21,6 @@ replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/bridge/opencensus => ../ -replace go.opentelemetry.io/otel/metric => ../../../metric - replace go.opentelemetry.io/otel/sdk => ../../../sdk -replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric - replace go.opentelemetry.io/otel/trace => ../../../trace diff --git a/bridge/opencensus/test/go.sum b/bridge/opencensus/test/go.sum index cc5af06ea900..c7ddc1e13074 100644 --- a/bridge/opencensus/test/go.sum +++ b/bridge/opencensus/test/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=