diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go index b9121fb423c2..248c7ad92900 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go @@ -10,7 +10,7 @@ import ( // Config relating to Disk Metric Scraper. type Config struct { - // MetricsbuilderConfig allows to customize scraped metrics/attributes representation. + // MetricsBuilderConfig allows to customize scraped metrics/attributes representation. metadata.MetricsBuilderConfig `mapstructure:",squash"` // Include specifies a filter on the devices that should be included from the generated metrics. diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go index 9cd6ab272e96..f8ad60e8468f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go @@ -81,12 +81,12 @@ func TestScrape(t *testing.T) { IncludeVirtualFS: true, IncludeFSTypes: FSTypeMatchConfig{Config: filterset.Config{MatchType: filterset.Strict}, FSTypes: []string{"tmpfs"}}, }, - partitionsFunc: func(_ context.Context, includeVirtual bool) (paritions []disk.PartitionStat, err error) { - paritions = append(paritions, disk.PartitionStat{Device: "root-device", Fstype: "ext4"}) + partitionsFunc: func(_ context.Context, includeVirtual bool) (partitions []disk.PartitionStat, err error) { + partitions = append(partitions, disk.PartitionStat{Device: "root-device", Fstype: "ext4"}) if includeVirtual { - paritions = append(paritions, disk.PartitionStat{Device: "shm", Fstype: "tmpfs"}) + partitions = append(partitions, disk.PartitionStat{Device: "shm", Fstype: "tmpfs"}) } - return paritions, err + return partitions, err }, usageFunc: func(context.Context, string) (*disk.UsageStat, error) { return &disk.UsageStat{}, nil @@ -267,7 +267,7 @@ func TestScrape(t *testing.T) { newErrRegex: "^error creating exclude_fs_types filter:", }, { - name: "Invalid Include Moountpoints Filter", + name: "Invalid Include Mountpoints Filter", config: Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), IncludeMountPoints: MountPointMatchConfig{MountPoints: []string{"test"}}, @@ -275,7 +275,7 @@ func TestScrape(t *testing.T) { newErrRegex: "^error creating include_mount_points filter:", }, { - name: "Invalid Exclude Moountpoints Filter", + name: "Invalid Exclude Mountpoints Filter", config: Config{ MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), ExcludeMountPoints: MountPointMatchConfig{MountPoints: []string{"test"}}, diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go index cb49f720fc66..be514dde50c8 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go @@ -72,7 +72,7 @@ func TestScrape(t *testing.T) { resultsMapLock := sync.Mutex{} testFn := func(t *testing.T, test testCase) { - // wait for messurement to start + // wait for measurement to start <-startChan scraper := newLoadScraper(context.Background(), scrapertest.NewNopSettings(), test.config) @@ -179,7 +179,7 @@ func assertCompareAveragePerCPU(t *testing.T, average pmetric.Metric, standard p // For hardware with only 1 cpu, results must be very close assert.InDelta(t, valAverage, valStandard, 0.1) } else { - // For hardward with multiple CPU, average per cpu is fatally less than standard + // For hardware with multiple CPU, average per cpu is fatally less than standard assert.Less(t, valAverage, valStandard) } } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go index 58e29f4982ce..0a73f7a36f93 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go @@ -45,7 +45,7 @@ func createMetricsScraper( cfg component.Config, ) (scraper.Metrics, error) { if runtime.GOOS != "linux" && runtime.GOOS != "windows" && runtime.GOOS != "darwin" { - return nil, errors.New("process scraper only available on Linux, Windows, or MacOS") + return nil, errors.New("process scraper only available on Linux, Windows, or macOS") } s, err := newProcessScraper(settings, cfg.(*Config)) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows.go index 7669d7515b05..3cd876b6cdab 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows.go @@ -13,7 +13,7 @@ import ( ) func NewManager() Manager { - return &handleCountManager{queryer: wmiHandleCountQueryer{}} + return &handleCountManager{querier: wmiHandleCountQuerier{}} } var ( @@ -21,17 +21,17 @@ var ( ErrNoHandleCountForProcess = errors.New("no handle count for process") ) -type handleCountQueryer interface { +type handleCountQuerier interface { queryProcessHandleCounts() (map[int64]uint32, error) } type handleCountManager struct { - queryer handleCountQueryer + querier handleCountQuerier handleCounts map[int64]uint32 } func (m *handleCountManager) Refresh() error { - handleCounts, err := m.queryer.queryProcessHandleCounts() + handleCounts, err := m.querier.queryProcessHandleCounts() if err != nil { return err } @@ -50,7 +50,7 @@ func (m *handleCountManager) GetProcessHandleCount(pid int64) (uint32, error) { return handleCount, nil } -type wmiHandleCountQueryer struct{} +type wmiHandleCountQuerier struct{} //revive:disable-next-line:var-naming type Win32_Process struct { @@ -58,7 +58,7 @@ type Win32_Process struct { HandleCount uint32 } -func (wmiHandleCountQueryer) queryProcessHandleCounts() (map[int64]uint32, error) { +func (wmiHandleCountQuerier) queryProcessHandleCounts() (map[int64]uint32, error) { handleCounts := []Win32_Process{} // creates query `get-wmiobject -query "select ProcessId, HandleCount from Win32_Process"` // based on reflection of Win32_Process type. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go index b757c83404bb..6cfc3f509f84 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go @@ -35,16 +35,16 @@ func TestHandleCountManager(t *testing.T) { assert.Contains(t, err.Error(), "3") } -type mockQueryer struct { +type mockQuerier struct { info map[int64]uint32 } -func (s mockQueryer) queryProcessHandleCounts() (map[int64]uint32, error) { +func (s mockQuerier) queryProcessHandleCounts() (map[int64]uint32, error) { return s.info, nil } func deterministicManagerWithInfo(info map[int64]uint32) *handleCountManager { return &handleCountManager{ - queryer: mockQueryer{info: info}, + querier: mockQuerier{info: info}, } } diff --git a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go index 2937ee621391..2b33c04dc056 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/systemscraper/factory.go @@ -33,7 +33,7 @@ func createMetricsScraper( cfg component.Config, ) (scraper.Metrics, error) { if runtime.GOOS != "linux" && runtime.GOOS != "windows" && runtime.GOOS != "darwin" { - return nil, errors.New("uptime scraper only available on Linux, Windows, or MacOS") + return nil, errors.New("uptime scraper only available on Linux, Windows, or macOS") } uptimeScraper := newUptimeScraper(ctx, settings, cfg.(*Config)) diff --git a/receiver/httpcheckreceiver/README.md b/receiver/httpcheckreceiver/README.md index d047236432bf..6a180acda839 100644 --- a/receiver/httpcheckreceiver/README.md +++ b/receiver/httpcheckreceiver/README.md @@ -13,7 +13,7 @@ [k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s -The HTTP Check Receiver can be used for synthethic checks against HTTP endpoints. This receiver will make a request to the specified `endpoint` using the +The HTTP Check Receiver can be used for synthetic checks against HTTP endpoints. This receiver will make a request to the specified `endpoint` using the configured `method`. This scraper generates a metric with a label for each HTTP response status class with a value of `1` if the status code matches the class. For example, the following metrics will be generated if the endpoint returned a `200`: diff --git a/receiver/huaweicloudcesreceiver/README.md b/receiver/huaweicloudcesreceiver/README.md index 28e006ebe991..a874006d42d0 100644 --- a/receiver/huaweicloudcesreceiver/README.md +++ b/receiver/huaweicloudcesreceiver/README.md @@ -38,9 +38,9 @@ The following settings are required: - `no_verify_ssl`: A boolean flag indicating whether SSL verification should be disabled. Set to True to disable SSL verification. -- `access_key`: The access key needed for CES authentification. Check `Huawei Cloud SDK Authentication Setup` section for more details. +- `access_key`: The access key needed for CES authentication. Check `Huawei Cloud SDK Authentication Setup` section for more details. -- `secret_key`: The secret key needed for CES authentification. Check `Huawei Cloud SDK Authentication Setup` section for more details. +- `secret_key`: The secret key needed for CES authentication. Check `Huawei Cloud SDK Authentication Setup` section for more details. The following settings are optional: diff --git a/receiver/huaweicloudcesreceiver/receiver.go b/receiver/huaweicloudcesreceiver/receiver.go index 52fe0cf3f9fb..28a9300d5e4e 100644 --- a/receiver/huaweicloudcesreceiver/receiver.go +++ b/receiver/huaweicloudcesreceiver/receiver.go @@ -170,7 +170,7 @@ func (rcvr *cesReceiver) listMetricDefinitions(ctx context.Context) ([]model.Met // listDataPoints retrieves data points for a list of metric definitions. // The function performs the following operations: -// 1. Generates a unique key for each metric definition (at least one dimenstion is required) and checks for duplicates. +// 1. Generates a unique key for each metric definition (at least one dimension is required) and checks for duplicates. // 2. Determines the time range (from-to) for fetching the metric data points, using the current timestamp // and the last-seen timestamp for each metric. // 3. Fetches data points for each metric definition. diff --git a/receiver/k8sclusterreceiver/informer_transform.go b/receiver/k8sclusterreceiver/informer_transform.go index 17486d4d8204..63fb388fde9d 100644 --- a/receiver/k8sclusterreceiver/informer_transform.go +++ b/receiver/k8sclusterreceiver/informer_transform.go @@ -8,7 +8,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/daemonset" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/deployment" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/jobs" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/node" @@ -33,7 +33,7 @@ func transformObject(object any) (any, error) { case *appsv1.Deployment: return deployment.Transform(o), nil case *appsv1.DaemonSet: - return demonset.Transform(o), nil + return daemonset.Transform(o), nil case *appsv1.StatefulSet: return statefulset.Transform(o), nil case *corev1.Service: diff --git a/receiver/k8sclusterreceiver/internal/collection/collector.go b/receiver/k8sclusterreceiver/internal/collection/collector.go index 8beff3ed03cb..3a939fa5f32d 100644 --- a/receiver/k8sclusterreceiver/internal/collection/collector.go +++ b/receiver/k8sclusterreceiver/internal/collection/collector.go @@ -17,7 +17,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/clusterresourcequota" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/cronjob" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/daemonset" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/deployment" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/gvk" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/hpa" @@ -88,7 +88,7 @@ func (dc *DataCollector) CollectMetricData(currentTime time.Time) pmetric.Metric replicaset.RecordMetrics(dc.metricsBuilder, o.(*appsv1.ReplicaSet), ts) }) dc.metadataStore.ForEach(gvk.DaemonSet, func(o any) { - demonset.RecordMetrics(dc.metricsBuilder, o.(*appsv1.DaemonSet), ts) + daemonset.RecordMetrics(dc.metricsBuilder, o.(*appsv1.DaemonSet), ts) }) dc.metadataStore.ForEach(gvk.StatefulSet, func(o any) { statefulset.RecordMetrics(dc.metricsBuilder, o.(*appsv1.StatefulSet), ts) diff --git a/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go b/receiver/k8sclusterreceiver/internal/daemonset/daemonsets.go similarity index 93% rename from receiver/k8sclusterreceiver/internal/demonset/daemonsets.go rename to receiver/k8sclusterreceiver/internal/daemonset/daemonsets.go index da941cea39f9..16b51f083a2a 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go +++ b/receiver/k8sclusterreceiver/internal/daemonset/daemonsets.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package demonset // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" +package daemonset // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/daemonset" import ( "go.opentelemetry.io/collector/pdata/pcommon" diff --git a/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go b/receiver/k8sclusterreceiver/internal/daemonset/daemonsets_test.go similarity index 99% rename from receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go rename to receiver/k8sclusterreceiver/internal/daemonset/daemonsets_test.go index edef8ae56d27..30e48cc00621 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go +++ b/receiver/k8sclusterreceiver/internal/daemonset/daemonsets_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package demonset +package daemonset import ( "path/filepath" diff --git a/receiver/k8sclusterreceiver/internal/demonset/package_test.go b/receiver/k8sclusterreceiver/internal/daemonset/package_test.go similarity index 91% rename from receiver/k8sclusterreceiver/internal/demonset/package_test.go rename to receiver/k8sclusterreceiver/internal/daemonset/package_test.go index cbb7f74afde5..cc766f8c7422 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/package_test.go +++ b/receiver/k8sclusterreceiver/internal/daemonset/package_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package demonset +package daemonset import ( "testing" diff --git a/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/daemonset/testdata/expected.yaml similarity index 100% rename from receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml rename to receiver/k8sclusterreceiver/internal/daemonset/testdata/expected.yaml diff --git a/receiver/k8sclusterreceiver/internal/pod/pods_test.go b/receiver/k8sclusterreceiver/internal/pod/pods_test.go index 805eb66d325e..07e8e00e5ecf 100644 --- a/receiver/k8sclusterreceiver/internal/pod/pods_test.go +++ b/receiver/k8sclusterreceiver/internal/pod/pods_test.go @@ -40,7 +40,7 @@ func TestPodAndContainerMetricsReportCPUMetrics(t *testing.T) { pod := testutils.NewPodWithContainer( "1", testutils.NewPodSpecWithContainer("container-name"), - testutils.NewPodStatusWithContainer("container-name", containerIDWithPreifx("container-id")), + testutils.NewPodStatusWithContainer("container-name", containerIDWithPrefix("container-id")), ) ts := pcommon.Timestamp(time.Now().UnixNano()) @@ -63,7 +63,7 @@ func TestPodStatusReasonAndContainerMetricsReportCPUMetrics(t *testing.T) { pod := testutils.NewPodWithContainer( "1", testutils.NewPodSpecWithContainer("container-name"), - testutils.NewEvictedTerminatedPodStatusWithContainer("container-name", containerIDWithPreifx("container-id")), + testutils.NewEvictedTerminatedPodStatusWithContainer("container-name", containerIDWithPrefix("container-id")), ) mbc := metadata.DefaultMetricsBuilderConfig() @@ -87,7 +87,7 @@ func TestPodStatusReasonAndContainerMetricsReportCPUMetrics(t *testing.T) { ) } -var containerIDWithPreifx = func(containerID string) string { +var containerIDWithPrefix = func(containerID string) string { return "docker://" + containerID } diff --git a/receiver/k8sclusterreceiver/receiver_test.go b/receiver/k8sclusterreceiver/receiver_test.go index 46623c0569e9..03d5fb9810d9 100644 --- a/receiver/k8sclusterreceiver/receiver_test.go +++ b/receiver/k8sclusterreceiver/receiver_test.go @@ -241,7 +241,7 @@ func TestReceiverWithMetadata(t *testing.T) { deletePods(t, client, 1) // Ensure ConsumeKubernetesMetadata is called twice, once for the add and - // then for the update. Note the second update does not result in metatada call + // then for the update. Note the second update does not result in metadata call // since the pod is not changed. require.Eventually(t, func() bool { return int(numCalls.Load()) == 2 diff --git a/receiver/k8sclusterreceiver/watcher.go b/receiver/k8sclusterreceiver/watcher.go index 240c4f9417b5..4912f056e48d 100644 --- a/receiver/k8sclusterreceiver/watcher.go +++ b/receiver/k8sclusterreceiver/watcher.go @@ -30,7 +30,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/cronjob" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/daemonset" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/deployment" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/gvk" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/hpa" @@ -305,7 +305,7 @@ func (rw *resourceWatcher) objMetadata(obj any) map[experimentalmetricmetadata.R case *appsv1.ReplicaSet: return replicaset.GetMetadata(o) case *appsv1.DaemonSet: - return demonset.GetMetadata(o) + return daemonset.GetMetadata(o) case *appsv1.StatefulSet: return statefulset.GetMetadata(o) case *batchv1.Job: diff --git a/receiver/k8sobjectsreceiver/receiver_test.go b/receiver/k8sobjectsreceiver/receiver_test.go index 23dadeacf51c..ee7a8a1ae2a3 100644 --- a/receiver/k8sobjectsreceiver/receiver_test.go +++ b/receiver/k8sobjectsreceiver/receiver_test.go @@ -149,7 +149,7 @@ func TestWatchObject(t *testing.T) { assert.NoError(t, r.Shutdown(ctx)) } -func TestExludeDeletedTrue(t *testing.T) { +func TestExcludeDeletedTrue(t *testing.T) { t.Parallel() mockClient := newMockDynamicClient() diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index bca2ba2554fb..300dc0813c39 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -177,8 +177,8 @@ func TestUnstructuredListToLogData(t *testing.T) { k8sNamespace.Str(), ) - watchEvenLogRecordtAttrs := logEntryFromWatchEvent.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes() - eventType, ok := watchEvenLogRecordtAttrs.Get("event.name") + watchEvenLogRecordAttrs := logEntryFromWatchEvent.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes() + eventType, ok := watchEvenLogRecordAttrs.Get("event.name") assert.True(t, ok) assert.Equal( t, @@ -186,7 +186,7 @@ func TestUnstructuredListToLogData(t *testing.T) { eventType.AsString(), ) - eventDomain, ok := watchEvenLogRecordtAttrs.Get("event.domain") + eventDomain, ok := watchEvenLogRecordAttrs.Get("event.domain") assert.True(t, ok) assert.Equal( t, @@ -194,7 +194,7 @@ func TestUnstructuredListToLogData(t *testing.T) { eventDomain.AsString(), ) - k8sResourceName, ok := watchEvenLogRecordtAttrs.Get("k8s.resource.name") + k8sResourceName, ok := watchEvenLogRecordAttrs.Get("k8s.resource.name") assert.True(t, ok) assert.Equal( t, diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index c3537fafb823..f2848f29995e 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -224,7 +224,7 @@ log retention size of a topic in Bytes, The value (-1) indicates infinite size. ### kafka.topic.min_insync_replicas -minimum insync replicas of a topic. +minimum in-sync replicas of a topic. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 3135188a4017..4ea40bf79dc7 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -700,7 +700,7 @@ type metricKafkaTopicMinInsyncReplicas struct { // init fills kafka.topic.min_insync_replicas metric with initial data. func (m *metricKafkaTopicMinInsyncReplicas) init() { m.data.SetName("kafka.topic.min_insync_replicas") - m.data.SetDescription("minimum insync replicas of a topic.") + m.data.SetDescription("minimum in-sync replicas of a topic.") m.data.SetUnit("{replicas}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go index 8b45bd711f4e..d0dc07bc4e7f 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics_test.go @@ -386,7 +386,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["kafka.topic.min_insync_replicas"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "minimum insync replicas of a topic.", ms.At(i).Description()) + assert.Equal(t, "minimum in-sync replicas of a topic.", ms.At(i).Description()) assert.Equal(t, "{replicas}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/receiver/kafkametricsreceiver/metadata.yaml b/receiver/kafkametricsreceiver/metadata.yaml index bf47e7bf0277..9c861806dbcc 100644 --- a/receiver/kafkametricsreceiver/metadata.yaml +++ b/receiver/kafkametricsreceiver/metadata.yaml @@ -78,7 +78,7 @@ metrics: attributes: [topic] kafka.topic.min_insync_replicas: enabled: false - description: minimum insync replicas of a topic. + description: minimum in-sync replicas of a topic. unit: "{replicas}" gauge: value_type: int diff --git a/receiver/kafkametricsreceiver/scraper_test_helper.go b/receiver/kafkametricsreceiver/scraper_test_helper.go index 776b25352b55..161caa5120d9 100644 --- a/receiver/kafkametricsreceiver/scraper_test_helper.go +++ b/receiver/kafkametricsreceiver/scraper_test_helper.go @@ -203,9 +203,9 @@ func newMockClusterAdmin() *mockClusterAdmin { td[testTopic] = sarama.TopicDetail{ ReplicationFactor: testReplicationFactor, ConfigEntries: map[string]*string{ - minInsyncRelicas: &strMinInsyncReplicas, - retentionMs: &strLogRetentionMs, - retentionBytes: &strLogRetentionBytes, + minInsyncReplicas: &strMinInsyncReplicas, + retentionMs: &strLogRetentionMs, + retentionBytes: &strLogRetentionBytes, }, } clusterAdmin.topics = td diff --git a/receiver/kafkametricsreceiver/topic_scraper.go b/receiver/kafkametricsreceiver/topic_scraper.go index e569137f254b..eadf7f567a46 100644 --- a/receiver/kafkametricsreceiver/topic_scraper.go +++ b/receiver/kafkametricsreceiver/topic_scraper.go @@ -33,9 +33,9 @@ type topicScraper struct { } const ( - minInsyncRelicas = "min.insync.replicas" - retentionMs = "retention.ms" - retentionBytes = "retention.bytes" + minInsyncReplicas = "min.insync.replicas" + retentionMs = "retention.ms" + retentionBytes = "retention.bytes" ) func (s *topicScraper) shutdown(context.Context) error { @@ -125,7 +125,7 @@ func (s *topicScraper) scrapeTopicConfigs(now pcommon.Timestamp, errors scrapere if s.clusterAdmin == nil { admin, err := newClusterAdmin(s.config.Brokers, s.saramaConfig) if err != nil { - s.settings.Logger.Error("Error creating kafka client with admin priviledges", zap.Error(err)) + s.settings.Logger.Error("Error creating kafka client with admin privileges", zap.Error(err)) return } s.clusterAdmin = admin @@ -141,12 +141,12 @@ func (s *topicScraper) scrapeTopicConfigs(now pcommon.Timestamp, errors scrapere configEntries, _ := s.clusterAdmin.DescribeConfig(sarama.ConfigResource{ Type: sarama.TopicResource, Name: name, - ConfigNames: []string{minInsyncRelicas, retentionMs, retentionBytes}, + ConfigNames: []string{minInsyncReplicas, retentionMs, retentionBytes}, }) for _, config := range configEntries { switch config.Name { - case minInsyncRelicas: + case minInsyncReplicas: if val, err := strconv.Atoi(config.Value); err == nil { s.mb.RecordKafkaTopicMinInsyncReplicasDataPoint(now, int64(val), name) } else { diff --git a/receiver/kafkareceiver/README.md b/receiver/kafkareceiver/README.md index b7b25c4eadb6..49f3c4c0ce31 100644 --- a/receiver/kafkareceiver/README.md +++ b/receiver/kafkareceiver/README.md @@ -94,7 +94,7 @@ The following settings can be optionally configured: - `on_error`: (default = false) If false, only the successfully processed messages are marked **Note: this can block the entire partition in case a message processing returns a permanent error** - `header_extraction`: - - `extract_headers` (default = false): Allows user to attach header fields to resource attributes in otel piepline + - `extract_headers` (default = false): Allows user to attach header fields to resource attributes in otel pipeline - `headers` (default = []): List of headers they'd like to extract from kafka record. **Note: Matching pattern will be `exact`. Regexes are not supported as of now.** diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index 1f321162930d..ed16721be341 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -1083,7 +1083,7 @@ func TestLogsConsumerGroupHandler_unmarshal_text(t *testing.T) { enc string }{ { - name: "unmarshal test for Englist (ASCII characters) with text_utf8", + name: "unmarshal test for English (ASCII characters) with text_utf8", text: "ASCII characters test", enc: "utf8", }, diff --git a/receiver/kubeletstatsreceiver/README.md b/receiver/kubeletstatsreceiver/README.md index d91c83402749..44b8dbce0866 100644 --- a/receiver/kubeletstatsreceiver/README.md +++ b/receiver/kubeletstatsreceiver/README.md @@ -157,7 +157,7 @@ include the following: - `container.id` - to augment metrics with Container ID label obtained from container statuses exposed via `/pods`. - `k8s.volume.type` - to collect volume type from the Pod spec exposed via `/pods` and have it as a label on volume metrics. -If there's more information available from the endpoint than just volume type, those are sycned as well depending on +If there's more information available from the endpoint than just volume type, those are synced as well depending on the available fields and the type of volume. For example, `aws.volume.id` would be synced from `awsElasticBlockStore` and `gcp.pd.name` is synced for `gcePersistentDisk`. diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go index c2ea767bbe31..9b14fde54cd8 100644 --- a/receiver/kubeletstatsreceiver/factory.go +++ b/receiver/kubeletstatsreceiver/factory.go @@ -103,7 +103,7 @@ func createMetricsReceiver( } } - scrp, err := newKubletScraper(rest, set, rOptions, cfg.MetricsBuilderConfig, cfg.NodeName) + scrp, err := newKubeletScraper(rest, set, rOptions, cfg.MetricsBuilderConfig, cfg.NodeName) if err != nil { return nil, err } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index fa35e320c2d0..846b1cc280b3 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -339,14 +339,14 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { name: "glusterfs", vs: v1.VolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: "endspoints_name", + EndpointsName: "endpoints_name", Path: "path", }, }, args: []string{"uid-1234", "k8s.volume.type"}, want: map[string]any{ "k8s.volume.type": "glusterfs", - "glusterfs.endpoints.name": "endspoints_name", + "glusterfs.endpoints.name": "endpoints_name", "glusterfs.path": "path", }, }, diff --git a/receiver/kubeletstatsreceiver/mocked_objects_test.go b/receiver/kubeletstatsreceiver/mocked_objects_test.go index 61797e1bc5c9..6144364b010f 100644 --- a/receiver/kubeletstatsreceiver/mocked_objects_test.go +++ b/receiver/kubeletstatsreceiver/mocked_objects_test.go @@ -13,7 +13,7 @@ import ( // getValidMockedObjects returns a list of volume claims and persistent // volume objects based on values present in testdata/pods.json. These -// values will be used to mock objects returned by the Kuberentes API. +// values will be used to mock objects returned by the Kubernetes API. func getValidMockedObjects() []runtime.Object { return []runtime.Object{ volumeClaim1, diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go index ce7c71730324..e19a37487c81 100644 --- a/receiver/kubeletstatsreceiver/scraper.go +++ b/receiver/kubeletstatsreceiver/scraper.go @@ -32,7 +32,7 @@ type scraperOptions struct { k8sAPIClient kubernetes.Interface } -type kubletScraper struct { +type kubeletScraper struct { statsProvider *kubelet.StatsProvider metadataProvider *kubelet.MetadataProvider logger *zap.Logger @@ -50,14 +50,14 @@ type kubletScraper struct { nodeLimits *kubelet.NodeCapacity } -func newKubletScraper( +func newKubeletScraper( restClient kubelet.RestClient, set receiver.Settings, rOptions *scraperOptions, metricsConfig metadata.MetricsBuilderConfig, nodeName string, ) (scraper.Metrics, error) { - ks := &kubletScraper{ + ks := &kubeletScraper{ statsProvider: kubelet.NewStatsProvider(restClient), metadataProvider: kubelet.NewMetadataProvider(restClient), logger: set.Logger, @@ -97,7 +97,7 @@ func newKubletScraper( ) } -func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) { +func (r *kubeletScraper) scrape(context.Context) (pmetric.Metrics, error) { summary, err := r.statsProvider.StatsSummary() if err != nil { r.logger.Error("call to /stats/summary endpoint failed", zap.Error(err)) @@ -129,7 +129,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) { return md, nil } -func (r *kubletScraper) detailedPVCLabelsSetter() func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error { +func (r *kubeletScraper) detailedPVCLabelsSetter() func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error { return func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error { if r.k8sAPIClient == nil { return nil @@ -160,13 +160,13 @@ func (r *kubletScraper) detailedPVCLabelsSetter() func(rb *metadata.ResourceBuil } } -func (r *kubletScraper) node() kubelet.NodeCapacity { +func (r *kubeletScraper) node() kubelet.NodeCapacity { r.m.RLock() defer r.m.RUnlock() return *r.nodeLimits } -func (r *kubletScraper) start(_ context.Context, _ component.Host) error { +func (r *kubeletScraper) start(_ context.Context, _ component.Host) error { if r.nodeInformer != nil { _, err := r.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: r.handleNodeAdd, @@ -180,7 +180,7 @@ func (r *kubletScraper) start(_ context.Context, _ component.Host) error { return nil } -func (r *kubletScraper) shutdown(_ context.Context) error { +func (r *kubeletScraper) shutdown(_ context.Context) error { r.logger.Debug("executing close") if r.stopCh != nil { close(r.stopCh) @@ -188,7 +188,7 @@ func (r *kubletScraper) shutdown(_ context.Context) error { return nil } -func (r *kubletScraper) handleNodeAdd(obj any) { +func (r *kubeletScraper) handleNodeAdd(obj any) { if node, ok := obj.(*v1.Node); ok { r.addOrUpdateNode(node) } else { @@ -196,7 +196,7 @@ func (r *kubletScraper) handleNodeAdd(obj any) { } } -func (r *kubletScraper) handleNodeUpdate(_, newNode any) { +func (r *kubeletScraper) handleNodeUpdate(_, newNode any) { if node, ok := newNode.(*v1.Node); ok { r.addOrUpdateNode(node) } else { @@ -204,7 +204,7 @@ func (r *kubletScraper) handleNodeUpdate(_, newNode any) { } } -func (r *kubletScraper) addOrUpdateNode(node *v1.Node) { +func (r *kubeletScraper) addOrUpdateNode(node *v1.Node) { r.m.Lock() defer r.m.Unlock() diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 8905ea9c015c..0b9694378a2e 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -55,7 +55,7 @@ func TestScraper(t *testing.T) { options := &scraperOptions{ metricGroupsToCollect: allMetricGroups, } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), options, @@ -105,7 +105,7 @@ func TestScraperWithCPUNodeUtilization(t *testing.T) { }, k8sAPIClient: client, } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), options, @@ -185,7 +185,7 @@ func TestScraperWithMemoryNodeUtilization(t *testing.T) { }, k8sAPIClient: client, } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), options, @@ -277,7 +277,7 @@ func TestScraperWithMetadata(t *testing.T) { extraMetadataLabels: tt.metadataLabels, metricGroupsToCollect: tt.metricGroups, } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), options, @@ -469,7 +469,7 @@ func TestScraperWithPercentMetrics(t *testing.T) { }, ResourceAttributes: metadata.DefaultResourceAttributesConfig(), } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), options, @@ -545,7 +545,7 @@ func TestScraperWithMetricGroups(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), &scraperOptions{ @@ -627,7 +627,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) { dataLen: numVolumes, }, { - name: "pvc_doesnot_exist", + name: "nonexistent_pvc", k8sAPIClient: fake.NewSimpleClientset(), dataLen: numVolumes - 3, volumeClaimsToMiss: map[string]bool{ @@ -707,7 +707,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{}, receivertest.NewNopSettings(), &scraperOptions{ @@ -795,7 +795,7 @@ func TestClientErrors(t *testing.T) { extraMetadataLabels: test.extraMetadataLabels, metricGroupsToCollect: test.metricGroupsToCollect, } - r, err := newKubletScraper( + r, err := newKubeletScraper( &fakeRestClient{ statsSummaryFail: test.statsSummaryFail, podsFail: test.podsFail, diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_pvc_labels_pvc_doesnot_exist_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_pvc_labels_nonexistent_pvc_expected.yaml similarity index 100% rename from receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_pvc_labels_pvc_doesnot_exist_expected.yaml rename to receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_pvc_labels_nonexistent_pvc_expected.yaml