Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace InitEmptyWithCapacity with EnsureCapacity and Clear #2845

Merged
merged 2 commits into from
Apr 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
- Remove testutil.WaitForPort, users can use testify.Eventually (#2926)
- Rename `processorhelper.NewTraceProcessor` to `processorhelper.NewTracesProcessor` (#2935)
- Rename `exporterhelper.NewTraceExporter` to `exporterhelper.NewTracesExporter` (#2937)
- Remove InitEmptyWithCapacity, add EnsureCapacity and Clear (#2845)

## 💡 Enhancements 💡

Expand Down
34 changes: 24 additions & 10 deletions consumer/pdata/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,13 +392,20 @@ func (am AttributeMap) InitFromMap(attrMap map[string]AttributeValue) AttributeM
return am
}

// InitEmptyWithCapacity constructs an empty AttributeMap with predefined slice capacity.
func (am AttributeMap) InitEmptyWithCapacity(cap int) {
if cap == 0 {
*am.orig = []otlpcommon.KeyValue(nil)
// Clear erases any existing entries in this AttributeMap instance.
func (am AttributeMap) Clear() {
*am.orig = nil
}

// EnsureCapacity increases the capacity of this AttributeMap instance, if necessary,
// to ensure that it can hold at least the number of elements specified by the capacity argument.
func (am AttributeMap) EnsureCapacity(capacity int) {
if capacity <= cap(*am.orig) {
return
}
*am.orig = make([]otlpcommon.KeyValue, 0, cap)
oldOrig := *am.orig
*am.orig = make([]otlpcommon.KeyValue, 0, capacity)
copy(*am.orig, oldOrig)
Comment on lines +407 to +408
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Append may be faster to increase the capacity, assuming memory manager has a way to extend the allocated block without reallocation (but I did not verify): https://github.com/golang/go/wiki/SliceTricks#extend
It may be worth benchmarking.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can play with the internal implementation.

}

// Get returns the AttributeValue associated with the key and true. Returned
Expand Down Expand Up @@ -685,13 +692,20 @@ func (sm StringMap) InitFromMap(attrMap map[string]string) StringMap {
return sm
}

// InitEmptyWithCapacity constructs an empty StringMap with predefined slice capacity.
func (sm StringMap) InitEmptyWithCapacity(cap int) {
if cap == 0 {
*sm.orig = []otlpcommon.StringKeyValue(nil)
// Clear erases any existing entries in this StringMap instance.
func (sm StringMap) Clear() {
*sm.orig = nil
}

// EnsureCapacity increases the capacity of this StringMap instance, if necessary,
// to ensure that it can hold at least the number of elements specified by the capacity argument.
func (sm StringMap) EnsureCapacity(capacity int) {
if capacity <= cap(*sm.orig) {
return
}
*sm.orig = make([]otlpcommon.StringKeyValue, 0, cap)
oldOrig := *sm.orig
*sm.orig = make([]otlpcommon.StringKeyValue, 0, capacity)
copy(*sm.orig, oldOrig)
}

// Get returns the StringValue associated with the key and true,
Expand Down
60 changes: 54 additions & 6 deletions consumer/pdata/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -669,11 +669,35 @@ func TestAttributeMap_Update(t *testing.T) {
assert.EqualValues(t, 123, av2.IntVal())
}

func TestAttributeMap_InitEmptyWithCapacity(t *testing.T) {
func TestAttributeMap_EnsureCapacity_Zero(t *testing.T) {
am := NewAttributeMap()
am.InitEmptyWithCapacity(0)
assert.Equal(t, NewAttributeMap(), am)
am.EnsureCapacity(0)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 0, cap(*am.orig))
}

func TestAttributeMap_EnsureCapacity(t *testing.T) {
am := NewAttributeMap()
am.EnsureCapacity(5)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 5, cap(*am.orig))
am.EnsureCapacity(3)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 5, cap(*am.orig))
am.EnsureCapacity(8)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 8, cap(*am.orig))
}

func TestAttributeMap_Clear(t *testing.T) {
am := NewAttributeMap()
assert.Nil(t, *am.orig)
am.Clear()
assert.Nil(t, *am.orig)
am.EnsureCapacity(5)
assert.NotNil(t, *am.orig)
am.Clear()
assert.Nil(t, *am.orig)
}

func TestNilStringMap(t *testing.T) {
Expand Down Expand Up @@ -846,11 +870,35 @@ func TestStringMap_CopyTo(t *testing.T) {
assert.EqualValues(t, generateTestStringMap(), dest)
}

func TestStringMap_InitEmptyWithCapacity(t *testing.T) {
func TestStringMap_EnsureCapacity_Zero(t *testing.T) {
sm := NewStringMap()
sm.InitEmptyWithCapacity(0)
assert.Equal(t, NewStringMap(), sm)
sm.EnsureCapacity(0)
assert.Equal(t, 0, sm.Len())
assert.Equal(t, 0, cap(*sm.orig))
}

func TestStringMap_EnsureCapacity(t *testing.T) {
sm := NewStringMap()
sm.EnsureCapacity(5)
assert.Equal(t, 0, sm.Len())
assert.Equal(t, 5, cap(*sm.orig))
sm.EnsureCapacity(3)
assert.Equal(t, 0, sm.Len())
assert.Equal(t, 5, cap(*sm.orig))
sm.EnsureCapacity(8)
assert.Equal(t, 0, sm.Len())
assert.Equal(t, 8, cap(*sm.orig))
}

func TestStringMap_Clear(t *testing.T) {
sm := NewStringMap()
assert.Nil(t, *sm.orig)
sm.Clear()
assert.Nil(t, *sm.orig)
sm.EnsureCapacity(5)
assert.NotNil(t, *sm.orig)
sm.Clear()
assert.Nil(t, *sm.orig)
}

func TestStringMap_InitFromMap(t *testing.T) {
Expand Down
2 changes: 0 additions & 2 deletions consumer/simple/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,6 @@ func BenchmarkPdataMetrics(b *testing.B) {
{
dp := dps.At(0)
labels := dp.LabelsMap()
labels.InitEmptyWithCapacity(3)
labels.Insert("env", "prod")
labels.Insert("app", "myapp")
labels.Insert("version", "1.0")
Expand All @@ -514,7 +513,6 @@ func BenchmarkPdataMetrics(b *testing.B) {
{
dp := dps.At(1)
labels := dp.LabelsMap()
labels.InitEmptyWithCapacity(3)
labels.Insert("env", "prod")
labels.Insert("app", "myapp")
labels.Insert("version", "1.0")
Expand Down
7 changes: 4 additions & 3 deletions exporter/opencensusexporter/opencensus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,14 +76,15 @@ func TestSendTraces(t *testing.T) {

sink.Reset()
// Sending data no Node.
td.ResourceSpans().At(0).Resource().Attributes().InitEmptyWithCapacity(0)
td.ResourceSpans().At(0).Resource().Attributes().Clear()
td = td.Clone()
assert.NoError(t, exp.ConsumeTraces(context.Background(), td))
assert.Eventually(t, func() bool {
return len(sink.AllTraces()) == 1
}, 10*time.Second, 5*time.Millisecond)
traces = sink.AllTraces()
require.Len(t, traces, 1)
assert.Equal(t, td, traces[0])
assert.EqualValues(t, td, traces[0])
}

func TestSendTraces_NoBackend(t *testing.T) {
Expand Down Expand Up @@ -173,7 +174,7 @@ func TestSendMetrics(t *testing.T) {

// Sending data no node.
sink.Reset()
md.ResourceMetrics().At(0).Resource().Attributes().InitEmptyWithCapacity(0)
md.ResourceMetrics().At(0).Resource().Attributes().Clear()
assert.NoError(t, exp.ConsumeMetrics(context.Background(), md))
assert.Eventually(t, func() bool {
return len(sink.AllMetrics()) == 1
Expand Down
14 changes: 3 additions & 11 deletions processor/resourceprocessor/resource_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@ type resourceProcessor struct {
func (rp *resourceProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) {
rss := td.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
resource := rss.At(i).Resource()
attrs := resource.Attributes()
rp.attrProc.Process(attrs)
rp.attrProc.Process(rss.At(i).Resource().Attributes())
}
return td, nil
}
Expand All @@ -40,11 +38,7 @@ func (rp *resourceProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (
func (rp *resourceProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) {
rms := md.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
resource := rms.At(i).Resource()
if resource.Attributes().Len() == 0 {
resource.Attributes().InitEmptyWithCapacity(1)
}
rp.attrProc.Process(resource.Attributes())
rp.attrProc.Process(rms.At(i).Resource().Attributes())
}
return md, nil
}
Expand All @@ -53,9 +47,7 @@ func (rp *resourceProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics)
func (rp *resourceProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) {
rls := ld.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
resource := rls.At(i).Resource()
attrs := resource.Attributes()
rp.attrProc.Process(attrs)
rp.attrProc.Process(rls.At(i).Resource().Attributes())
}
return ld, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -49,45 +49,25 @@ type commandMetadata struct {

func (m *processMetadata) initializeResource(resource pdata.Resource) {
attr := resource.Attributes()
attr.InitEmptyWithCapacity(6)
m.insertPid(attr)
m.insertExecutable(attr)
m.insertCommand(attr)
m.insertUsername(attr)
}

func (m *processMetadata) insertPid(attr pdata.AttributeMap) {
attr.EnsureCapacity(6)
bogdandrutu marked this conversation as resolved.
Show resolved Hide resolved
attr.InsertInt(conventions.AttributeProcessID, int64(m.pid))
}

func (m *processMetadata) insertExecutable(attr pdata.AttributeMap) {
attr.InsertString(conventions.AttributeProcessExecutableName, m.executable.name)
attr.InsertString(conventions.AttributeProcessExecutablePath, m.executable.path)
}

func (m *processMetadata) insertCommand(attr pdata.AttributeMap) {
if m.command == nil {
return
if m.command != nil {
attr.InsertString(conventions.AttributeProcessCommand, m.command.command)
if m.command.commandLineSlice != nil {
// TODO insert slice here once this is supported by the data model
// (see https://github.com/open-telemetry/opentelemetry-collector/pull/1142)
attr.InsertString(conventions.AttributeProcessCommandLine, strings.Join(m.command.commandLineSlice, " "))
} else {
attr.InsertString(conventions.AttributeProcessCommandLine, m.command.commandLine)
}
}

attr.InsertString(conventions.AttributeProcessCommand, m.command.command)
if m.command.commandLineSlice != nil {
// TODO insert slice here once this is supported by the data model
// (see https://github.com/open-telemetry/opentelemetry-collector/pull/1142)
attr.InsertString(conventions.AttributeProcessCommandLine, strings.Join(m.command.commandLineSlice, " "))
} else {
attr.InsertString(conventions.AttributeProcessCommandLine, m.command.commandLine)
if m.username != "" {
attr.InsertString(conventions.AttributeProcessOwner, m.username)
}
}

func (m *processMetadata) insertUsername(attr pdata.AttributeMap) {
if m.username == "" {
return
}

attr.InsertString(conventions.AttributeProcessOwner, m.username)
}

// processHandles provides a wrapper around []*process.Process
// to support testing

Expand Down
4 changes: 2 additions & 2 deletions testbed/testbed/data_providers.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (dp *PerfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) {
md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().Resize(1)
if dp.options.Attributes != nil {
attrs := md.ResourceMetrics().At(0).Resource().Attributes()
attrs.InitEmptyWithCapacity(len(dp.options.Attributes))
attrs.EnsureCapacity(len(dp.options.Attributes))
for k, v := range dp.options.Attributes {
attrs.UpsertString(k, v)
}
Expand Down Expand Up @@ -168,7 +168,7 @@ func (dp *PerfTestDataProvider) GenerateLogs() (pdata.Logs, bool) {
logs.ResourceLogs().At(0).InstrumentationLibraryLogs().Resize(1)
if dp.options.Attributes != nil {
attrs := logs.ResourceLogs().At(0).Resource().Attributes()
attrs.InitEmptyWithCapacity(len(dp.options.Attributes))
attrs.EnsureCapacity(len(dp.options.Attributes))
for k, v := range dp.options.Attributes {
attrs.UpsertString(k, v)
}
Expand Down
22 changes: 12 additions & 10 deletions translator/internaldata/oc_to_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func setDataPoints(ocMetric *ocmetrics.Metric, metric pdata.Metric) {
}
}

func setLabelsMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics.LabelValue, labelsMap pdata.StringMap) {
func fillLabelsMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics.LabelValue, labelsMap pdata.StringMap) {
if len(ocLabelsKeys) == 0 || len(ocLabelValues) == 0 {
return
}
Expand All @@ -250,7 +250,8 @@ func setLabelsMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics
lablesCount = len(ocLabelValues)
}

labelsMap.InitEmptyWithCapacity(lablesCount)
labelsMap.Clear()
labelsMap.EnsureCapacity(lablesCount)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change in behvaior if labelsMap was not empty before. Is this guaranteed? If this is the case I suggest changing setLabelsMap to return labelsMap pdata.StringMap instead of accepting it as a parameter.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Returning is not a good pattern for the way we want users to use our pdata. Returning means I need to have a SetLablesMap which we don't have because we want users to construct things top-down to avoid extra memory allocation when copying to the parent struct.

In this case, as in all the other cases we start with an empty object during conversion, so there is no problem with having previous values.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In contrib I plan to replace with Clear + EnsureCapacity so I don't have to manually check all usages.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is not apparent from setLabelsMap signature that labelsMap is empty. By looking at setLabelsMap code only it is not possible to tell if the code is correct. To know that the code is correct you have to analyse the call site where setLabelsMap.

What do you think about calling Clear() here too (like you want to do in contrib), or declare the requirement in setLabelsMap doccomment that labelsMap must be empty? I think calling Clear() is fine, it should have very little cost.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, just to make the reviewer happy :)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

:-)

for i := 0; i < lablesCount; i++ {
if !ocLabelValues[i].GetHasValue() {
continue
Expand Down Expand Up @@ -280,7 +281,7 @@ func fillIntDataPoint(ocMetric *ocmetrics.Metric, dps pdata.IntDataPointSlice) {

dp.SetStartTimestamp(startTimestamp)
dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime()))
setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
dp.SetValue(point.GetInt64Value())
}
}
Expand Down Expand Up @@ -308,7 +309,7 @@ func fillDoubleDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleDataPointSl

dp.SetStartTimestamp(startTimestamp)
dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime()))
setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
dp.SetValue(point.GetDoubleValue())
}
}
Expand Down Expand Up @@ -336,7 +337,7 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra

dp.SetStartTimestamp(startTimestamp)
dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime()))
setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
distributionValue := point.GetDistributionValue()
dp.SetSum(distributionValue.GetSum())
dp.SetCount(uint64(distributionValue.GetCount()))
Expand Down Expand Up @@ -368,7 +369,7 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat

dp.SetStartTimestamp(startTimestamp)
dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime()))
setLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap())
summaryValue := point.GetSummaryValue()
dp.SetSum(summaryValue.GetSum().GetValue())
dp.SetCount(uint64(summaryValue.GetCount().GetValue()))
Expand Down Expand Up @@ -414,12 +415,13 @@ func exemplarToMetrics(ocExemplar *ocmetrics.DistributionValue_Exemplar, exempla
if ocExemplar.GetTimestamp() != nil {
exemplar.SetTimestamp(pdata.TimestampFromTime(ocExemplar.GetTimestamp().AsTime()))
}
exemplar.SetValue(ocExemplar.GetValue())
attachments := exemplar.FilteredLabels()
ocAttachments := ocExemplar.GetAttachments()
attachments.InitEmptyWithCapacity(len(ocAttachments))
exemplar.SetValue(ocExemplar.GetValue())
filteredLabels := exemplar.FilteredLabels()
filteredLabels.Clear()
filteredLabels.EnsureCapacity(len(ocAttachments))
for k, v := range ocAttachments {
attachments.Upsert(k, v)
filteredLabels.Upsert(k, v)
}
}

Expand Down
3 changes: 2 additions & 1 deletion translator/internaldata/oc_to_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ func ocNodeResourceToInternal(ocNode *occommon.Node, ocResource *ocresource.Reso
}

attrs := dest.Attributes()
attrs.InitEmptyWithCapacity(maxTotalAttrCount)
attrs.Clear()
attrs.EnsureCapacity(maxTotalAttrCount)
bogdandrutu marked this conversation as resolved.
Show resolved Hide resolved

if ocNode != nil {
// Copy all Attributes.
Expand Down
Loading