diff --git a/.pylintrc b/.pylintrc
index 8f29b634f1b..b23bef8e66b 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -5,11 +5,11 @@
# run arbitrary code.
extension-pkg-whitelist=
-# Add files or directories to the blacklist. They should be base names, not
+# Add list of files or directories to be excluded. They should be base names, not
# paths.
ignore=CVS,gen,proto
-# Add files or directories matching the regex patterns to the blacklist. The
+# Add files or directories matching the regex patterns to be excluded. The
# regex matches against base names, not paths.
ignore-patterns=
diff --git a/README.md b/README.md
index cc6b7f10b66..ca9fd652ac9 100644
--- a/README.md
+++ b/README.md
@@ -36,11 +36,13 @@
---
-## About this project
+## OpenTelemetry Python
-The Python [OpenTelemetry](https://opentelemetry.io/) client.
+The Python [OpenTelemetry](https://opentelemetry.io/) implementation.
-## Installation
+## Getting started
+
+OpenTelemetry's goal is to provide a single set of APIs to capture distributed traces and metrics from your application and send them to an observability platform. This project allows you to do just that for applications written in Python.
This repository includes multiple installable packages. The `opentelemetry-api`
package includes abstract classes and no-op implementations that comprise the OpenTelemetry API following
@@ -52,9 +54,6 @@ Libraries that produce telemetry data should only depend on `opentelemetry-api`,
and defer the choice of the SDK to the application developer. Applications may
depend on `opentelemetry-sdk` or another package that implements the API.
-**Please note** that this library is currently in _beta_, and shouldn't
-generally be used in production environments.
-
The API and SDK packages are available on PyPI, and can installed via `pip`:
```sh
@@ -94,15 +93,11 @@ The online documentation is available at https://opentelemetry-python.readthedoc
if you want to access the documentation for the latest version use
https://opentelemetry-python.readthedocs.io/en/latest/.
-## Compatible Exporters
-
-See the [OpenTelemetry registry](https://opentelemetry.io/registry/?s=python) for a list of exporters available.
-
## Contributing
-See [CONTRIBUTING.md](CONTRIBUTING.md)
+See [CONTRIBUTING.md](CONTRIBUTING.md).
-We meet weekly on Thursday, and the time of the meeting alternates between 9AM PT and 4PM PT. The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) for specific dates.
+We meet weekly on Thursday at 9AM PT. The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) for specific dates.
Meetings take place via [Zoom video conference](https://zoom.us/j/6729396170).
@@ -134,78 +129,7 @@ Maintainers ([@open-telemetry/python-maintainers](https://github.com/orgs/open-t
-## Release Schedule
-
-OpenTelemetry Python is under active development.
-
-The library is not yet _generally available_, and releases aren't guaranteed to
-conform to a specific version of the specification. Future releases will not
-attempt to maintain backwards compatibility with previous releases. Each alpha
-and beta release includes significant changes to the API and SDK packages,
-making them incompatible with each other.
-
-The [v0.1 alpha
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.1.0)
-includes:
-
-- Tracing API
-- Tracing SDK
-- Metrics API
-- Metrics SDK (Partial)
-- W3C Trace Context Propagation
-- B3 Context Propagation
-- HTTP Integrations
-
-The [v0.2 alpha
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.2.0)
-includes:
-
-- OpenTracing Bridge
-- Jaeger Trace Exporter
-- Trace Sampling
-
-The [v0.3 alpha
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.3.0)
-includes:
-
-- Metrics Instruments and Labels
-- Flask Integration
-- PyMongo Integration
-
-The [v0.4 alpha
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.4.0)
-includes:
-
-- Metrics MinMaxSumCount Aggregator
-- Context API
-- Full Metrics SDK Pipeline
-- Metrics STDOUT Exporter
-- Dbapi2 Integration
-- MySQL Integration
-- Psycopg2 Integration
-- Zipkin Exporter
-- Prometheus Metrics Exporter
-- New Examples and Improvements to Existing Examples
-
-The [v0.5 beta
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.5.0)
-includes:
-
-- W3C Correlation Context Propagation
-- OpenTelemetry Collector Exporter Integration for both metrics and traces
-- Metrics SDK
-- Global configuration module
-- Documentation improvements
-
-The [v0.6 beta
-release](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.6.0)
-includes:
-
-- API changes and bugfixes
-- An autoinstrumentation package and updated Flask instrumentation
-- gRPC integration
-
-See the [project
-milestones](https://github.com/open-telemetry/opentelemetry-python/milestones)
-for details on upcoming releases. The dates and features described in issues
+## Project Status
+
+Project [boards](https://github.com/open-telemetry/opentelemetry-python/projects) and [milestones](https://github.com/open-telemetry/opentelemetry-python/milestones) can be found at the respective links. We try to keep these accurate and should be the best place to go for answers on project status. The dates and features described in issues
and milestones are estimates, and subject to change.
diff --git a/dev-requirements.txt b/dev-requirements.txt
index dd4d2e37c97..6537a9a4448 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -1,5 +1,5 @@
pylint==2.4.4
-flake8==3.7.9
+flake8~=3.7
isort~=4.3
black>=19.3b0,==19.*
mypy==0.770
diff --git a/docs/api/api.rst b/docs/api/api.rst
index e1e82114296..ec6d8b03aa3 100644
--- a/docs/api/api.rst
+++ b/docs/api/api.rst
@@ -6,8 +6,8 @@ OpenTelemetry Python API
.. toctree::
:maxdepth: 1
+ baggage
configuration
context
- correlationcontext
metrics
trace
diff --git a/docs/api/baggage.propagation.rst b/docs/api/baggage.propagation.rst
new file mode 100644
index 00000000000..7c8eba79407
--- /dev/null
+++ b/docs/api/baggage.propagation.rst
@@ -0,0 +1,7 @@
+opentelemetry.baggage.propagation package
+====================================================
+
+Module contents
+---------------
+
+.. automodule:: opentelemetry.baggage.propagation
diff --git a/docs/api/baggage.rst b/docs/api/baggage.rst
new file mode 100644
index 00000000000..34712e78bd8
--- /dev/null
+++ b/docs/api/baggage.rst
@@ -0,0 +1,14 @@
+opentelemetry.baggage package
+========================================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ baggage.propagation
+
+Module contents
+---------------
+
+.. automodule:: opentelemetry.baggage
diff --git a/docs/api/correlationcontext.propagation.rst b/docs/api/correlationcontext.propagation.rst
deleted file mode 100644
index a9b94aa4fb4..00000000000
--- a/docs/api/correlationcontext.propagation.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-opentelemetry.correlationcontext.propagation package
-====================================================
-
-Module contents
----------------
-
-.. automodule:: opentelemetry.correlationcontext.propagation
diff --git a/docs/api/correlationcontext.rst b/docs/api/correlationcontext.rst
deleted file mode 100644
index 10e7b2e573b..00000000000
--- a/docs/api/correlationcontext.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-opentelemetry.correlationcontext package
-========================================
-
-Subpackages
------------
-
-.. toctree::
-
- correlationcontext.propagation
-
-Module contents
----------------
-
-.. automodule:: opentelemetry.correlationcontext
diff --git a/docs/api/trace.rst b/docs/api/trace.rst
index 411e31023ec..65d9b4d8c88 100644
--- a/docs/api/trace.rst
+++ b/docs/api/trace.rst
@@ -6,7 +6,6 @@ Submodules
.. toctree::
- trace.sampling
trace.status
trace.span
diff --git a/docs/api/trace.sampling.rst b/docs/api/trace.sampling.rst
deleted file mode 100644
index 6280fd1d11b..00000000000
--- a/docs/api/trace.sampling.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Sampling Traces
-===============
-
-.. automodule:: opentelemetry.trace.sampling
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/conf.py b/docs/conf.py
index 4b9753c96c4..d15d8b2ed5b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -103,17 +103,14 @@
# with "class reference target not found: ObjectProxy".
("py:class", "ObjectProxy"),
# TODO: Understand why sphinx is not able to find this local class
- (
- "py:class",
- "opentelemetry.trace.propagation.httptextformat.HTTPTextFormat",
- ),
+ ("py:class", "opentelemetry.trace.propagation.textmap.TextMapPropagator",),
(
"any",
- "opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract",
+ "opentelemetry.trace.propagation.textmap.TextMapPropagator.extract",
),
(
"any",
- "opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject",
+ "opentelemetry.trace.propagation.textmap.TextMapPropagator.inject",
),
]
diff --git a/docs/examples/datadog_exporter/server.py b/docs/examples/datadog_exporter/server.py
index 15d10f34934..9c83de8bb87 100644
--- a/docs/examples/datadog_exporter/server.py
+++ b/docs/examples/datadog_exporter/server.py
@@ -35,19 +35,19 @@
)
# append Datadog format for propagation to and from Datadog instrumented services
-global_httptextformat = propagators.get_global_httptextformat()
+global_textmap = propagators.get_global_textmap()
if isinstance(
- global_httptextformat, propagators.composite.CompositeHTTPPropagator
+ global_textmap, propagators.composite.CompositeHTTPPropagator
) and not any(
- isinstance(p, DatadogFormat) for p in global_httptextformat._propagators
+ isinstance(p, DatadogFormat) for p in global_textmap._propagators
):
- propagators.set_global_httptextformat(
+ propagators.set_global_textmap(
propagators.composite.CompositeHTTPPropagator(
- global_httptextformat._propagators + [DatadogFormat()]
+ global_textmap._propagators + [DatadogFormat()]
)
)
else:
- propagators.set_global_httptextformat(DatadogFormat())
+ propagators.set_global_textmap(DatadogFormat())
tracer = trace.get_tracer(__name__)
diff --git a/docs/faq-and-cookbook.rst b/docs/faq-and-cookbook.rst
new file mode 100644
index 00000000000..c7f630fce78
--- /dev/null
+++ b/docs/faq-and-cookbook.rst
@@ -0,0 +1,27 @@
+Frequently Asked Questions and Cookbook
+=======================================
+
+This page answers frequently asked questions, and serves as a cookbook
+for common scenarios.
+
+Create a new span
+-----------------
+
+.. code-block:: python
+
+ from opentelemetry import trace
+
+ tracer = trace.get_tracer(__name__)
+ with tracer.start_as_current_span("print") as span:
+ print("foo")
+ span.set_attribute("printed_string", "foo")
+
+Getting and modifying a span
+----------------------------
+
+.. code-block:: python
+
+ from opentelemetry import trace
+
+ current_span = trace.get_current_span()
+ current_span.set_attribute("hometown", "seattle")
diff --git a/docs/getting-started.rst b/docs/getting-started.rst
index 8c27ddfa4d3..213989fbe56 100644
--- a/docs/getting-started.rst
+++ b/docs/getting-started.rst
@@ -174,7 +174,7 @@ Now run the above script, hit the root url (http://localhost:5000/) a few times,
python flask_example.py
-Configure Your HTTP Propagator (b3, CorrelationContext)
+Configure Your HTTP Propagator (b3, Baggage)
-------------------------------------------------------
A major feature of distributed tracing is the ability to correlate a trace across
@@ -194,7 +194,7 @@ an example using Zipkin's `b3 propagation `_ client.
This documentation describes the :doc:`opentelemetry-api `,
:doc:`opentelemetry-sdk `, and several `integration packages <#integrations>`_.
-**Please note** that this library is currently in alpha, and shouldn't be
-used in production environments.
+**Please note** that this library is currently in _beta_, and shouldn't
+generally be used in production environments.
Installation
------------
@@ -62,6 +62,7 @@ install
:name: getting-started
getting-started
+ faq-and-cookbook
.. toctree::
:maxdepth: 1
diff --git a/docs/instrumentation/asgi/asgi.rst b/docs/instrumentation/asgi/asgi.rst
index abb1621973c..b988e4de430 100644
--- a/docs/instrumentation/asgi/asgi.rst
+++ b/docs/instrumentation/asgi/asgi.rst
@@ -1,8 +1,7 @@
-OpenTelemetry asgi Instrumentation
-===================================
+.. include:: ../../../instrumentation/opentelemetry-instrumentation-asgi/README.rst
-Module contents
----------------
+API
+---
.. automodule:: opentelemetry.instrumentation.asgi
:members:
diff --git a/docs/sdk/trace.rst b/docs/sdk/trace.rst
index ce06fb4abb0..0b53444e3bf 100644
--- a/docs/sdk/trace.rst
+++ b/docs/sdk/trace.rst
@@ -7,6 +7,7 @@ Submodules
.. toctree::
trace.export
+ trace.sampling
util.instrumentation
.. automodule:: opentelemetry.sdk.trace
diff --git a/docs/sdk/trace.sampling.rst b/docs/sdk/trace.sampling.rst
new file mode 100644
index 00000000000..f9c2fffa253
--- /dev/null
+++ b/docs/sdk/trace.sampling.rst
@@ -0,0 +1,7 @@
+opentelemetry.sdk.trace.sampling
+==========================================
+
+.. automodule:: opentelemetry.sdk.trace.sampling
+ :members:
+ :undoc-members:
+ :show-inheritance:
\ No newline at end of file
diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/__init__.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/__init__.py
index 5a73c55d69f..3294ba4e4e0 100644
--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/__init__.py
+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/__init__.py
@@ -50,7 +50,7 @@
trace.get_tracer_provider().add_span_processor(span_processor)
# Optional: use Datadog format for propagation in distributed traces
- propagators.set_global_httptextformat(DatadogFormat())
+ propagators.set_global_textmap(DatadogFormat())
with tracer.start_as_current_span("foo"):
print("Hello world!")
diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
index 49dab7c6866..37c78187f8e 100644
--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
@@ -21,6 +21,7 @@
from ddtrace.span import Span as DatadogSpan
import opentelemetry.trace as trace_api
+from opentelemetry.sdk.trace import sampling
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from opentelemetry.trace.status import StatusCanonicalCode
@@ -246,7 +247,7 @@ def _get_sampling_rate(span):
return (
span.sampler.rate
if ctx.trace_flags.sampled
- and isinstance(span.sampler, trace_api.sampling.ProbabilitySampler)
+ and isinstance(span.sampler, sampling.TraceIdRatioBased)
else None
)
diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/propagator.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/propagator.py
index 5935557de8d..d2e60476e68 100644
--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/propagator.py
+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/propagator.py
@@ -17,18 +17,18 @@
from opentelemetry import trace
from opentelemetry.context import Context
from opentelemetry.trace import get_current_span, set_span_in_context
-from opentelemetry.trace.propagation.httptextformat import (
+from opentelemetry.trace.propagation.textmap import (
Getter,
- HTTPTextFormat,
- HTTPTextFormatT,
Setter,
+ TextMapPropagator,
+ TextMapPropagatorT,
)
# pylint:disable=relative-beyond-top-level
from . import constants
-class DatadogFormat(HTTPTextFormat):
+class DatadogFormat(TextMapPropagator):
"""Propagator for the Datadog HTTP header format.
"""
@@ -39,8 +39,8 @@ class DatadogFormat(HTTPTextFormat):
def extract(
self,
- get_from_carrier: Getter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ get_from_carrier: Getter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
trace_id = extract_first_element(
@@ -81,8 +81,8 @@ def extract(
def inject(
self,
- set_in_carrier: Setter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ set_in_carrier: Setter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
span = get_current_span(context)
@@ -120,8 +120,8 @@ def format_span_id(span_id: int) -> str:
def extract_first_element(
- items: typing.Iterable[HTTPTextFormatT],
-) -> typing.Optional[HTTPTextFormatT]:
+ items: typing.Iterable[TextMapPropagatorT],
+) -> typing.Optional[TextMapPropagatorT]:
if items is None:
return None
return next(iter(items), None)
diff --git a/exporter/opentelemetry-exporter-datadog/tests/test_datadog_exporter.py b/exporter/opentelemetry-exporter-datadog/tests/test_datadog_exporter.py
index 45ce9417e1a..73c8cb3bf82 100644
--- a/exporter/opentelemetry-exporter-datadog/tests/test_datadog_exporter.py
+++ b/exporter/opentelemetry-exporter-datadog/tests/test_datadog_exporter.py
@@ -23,6 +23,7 @@
from opentelemetry import trace as trace_api
from opentelemetry.exporter import datadog
from opentelemetry.sdk import trace
+from opentelemetry.sdk.trace import sampling
from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
@@ -497,7 +498,7 @@ def test_sampling_rate(self):
is_remote=False,
trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED),
)
- sampler = trace_api.sampling.ProbabilitySampler(0.5)
+ sampler = sampling.TraceIdRatioBased(0.5)
span = trace.Span(
name="sampled", context=context, parent=None, sampler=sampler
diff --git a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/__init__.py b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/__init__.py
index afa0b2578f7..b998a6aecf1 100644
--- a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/__init__.py
+++ b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/__init__.py
@@ -168,7 +168,8 @@ def export(self, spans):
if self.collector is not None:
self.collector.submit(batch)
- self.agent_client.emit(batch)
+ else:
+ self.agent_client.emit(batch)
return SpanExportResult.SUCCESS
@@ -203,7 +204,7 @@ def _translate_to_jaeger(spans: Span):
parent_id = span.parent.span_id if span.parent else 0
tags = _extract_tags(span.attributes)
- tags.extend(_extract_tags(span.resource.labels))
+ tags.extend(_extract_tags(span.resource.attributes))
tags.extend(
[
diff --git a/exporter/opentelemetry-exporter-jaeger/tests/test_jaeger_exporter.py b/exporter/opentelemetry-exporter-jaeger/tests/test_jaeger_exporter.py
index 7b3916c5b95..bb852a07988 100644
--- a/exporter/opentelemetry-exporter-jaeger/tests/test_jaeger_exporter.py
+++ b/exporter/opentelemetry-exporter-jaeger/tests/test_jaeger_exporter.py
@@ -207,7 +207,7 @@ def test_translate_to_jaeger(self):
otel_spans[0].set_attribute("key_float", 111.22)
otel_spans[0].set_attribute("key_tuple", ("tuple_element",))
otel_spans[0].resource = Resource(
- labels={"key_resource": "some_resource"}
+ attributes={"key_resource": "some_resource"}
)
otel_spans[0].set_status(
Status(StatusCanonicalCode.UNKNOWN, "Example description")
@@ -215,9 +215,11 @@ def test_translate_to_jaeger(self):
otel_spans[0].end(end_time=end_times[0])
otel_spans[1].start(start_time=start_times[1])
+ otel_spans[1].resource = Resource({})
otel_spans[1].end(end_time=end_times[1])
otel_spans[2].start(start_time=start_times[2])
+ otel_spans[2].resource = Resource({})
otel_spans[2].end(end_time=end_times[2])
# pylint: disable=protected-access
@@ -367,7 +369,7 @@ def test_export(self):
exporter._collector = collector_mock
exporter.export((self._test_span,))
- self.assertEqual(agent_client_mock.emit.call_count, 2)
+ self.assertEqual(agent_client_mock.emit.call_count, 1)
self.assertEqual(collector_mock.submit.call_count, 1)
def test_agent_client(self):
diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/metrics_exporter/__init__.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/metrics_exporter/__init__.py
index 76986a8a59d..204a7c5476d 100644
--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/metrics_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/metrics_exporter/__init__.py
@@ -191,19 +191,19 @@ def get_collector_point(metric_record: MetricRecord) -> metrics_pb2.Point:
def get_resource(metric_record: MetricRecord) -> resource_pb2.Resource:
- resource_labels = metric_record.instrument.meter.resource.labels
+ resource_attributes = metric_record.instrument.meter.resource.attributes
return resource_pb2.Resource(
- type=infer_oc_resource_type(resource_labels),
- labels={k: str(v) for k, v in resource_labels.items()},
+ type=infer_oc_resource_type(resource_attributes),
+ labels={k: str(v) for k, v in resource_attributes.items()},
)
-def infer_oc_resource_type(resource_labels: Dict[str, str]) -> str:
+def infer_oc_resource_type(resource_attributes: Dict[str, str]) -> str:
"""Convert from OT resource labels to OC resource type"""
for (
ot_resource_key,
oc_resource_type,
) in _OT_LABEL_PRESENCE_TO_RESOURCE_TYPE:
- if ot_resource_key in resource_labels:
+ if ot_resource_key in resource_attributes:
return oc_resource_type
return ""
diff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
index a078cb7ccc9..a4d8f46d4c2 100644
--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
@@ -36,7 +36,7 @@
# Resource can be required for some backends, e.g. Jaeger
# If resource wouldn't be set - traces wouldn't appears in Jaeger
- resource = Resource(labels=labels={
+ resource = Resource(attributes={
"service.name": "service"
})
diff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/exporter.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/exporter.py
index 0ce7ef66177..7cd9f905e06 100644
--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/exporter.py
+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/exporter.py
@@ -76,7 +76,7 @@ def _get_resource_data(
collector_resource = Resource()
- for key, value in sdk_resource.labels.items():
+ for key, value in sdk_resource.attributes.items():
try:
# pylint: disable=no-member
@@ -186,7 +186,7 @@ def _export(self, data):
if error.code() == StatusCode.OK:
return self._result.SUCCESS
- return self.result.FAILURE
+ return self._result.FAILURE
return self._result.FAILURE
diff --git a/exporter/opentelemetry-exporter-otlp/tests/test_otlp_trace_exporter.py b/exporter/opentelemetry-exporter-otlp/tests/test_otlp_trace_exporter.py
index b0ec8e4517e..a7f572323e0 100644
--- a/exporter/opentelemetry-exporter-otlp/tests/test_otlp_trace_exporter.py
+++ b/exporter/opentelemetry-exporter-otlp/tests/test_otlp_trace_exporter.py
@@ -87,6 +87,14 @@ def Export(self, request, context):
return ExportTraceServiceResponse()
+class TraceServiceServicerALREADY_EXISTS(TraceServiceServicer):
+ # pylint: disable=invalid-name,unused-argument,no-self-use
+ def Export(self, request, context):
+ context.set_code(StatusCode.ALREADY_EXISTS)
+
+ return ExportTraceServiceResponse()
+
+
class TestOTLPSpanExporter(TestCase):
def setUp(self):
tracer_provider = TracerProvider()
@@ -178,6 +186,14 @@ def test_success(self):
self.exporter.export([self.span]), SpanExportResult.SUCCESS
)
+ def test_failure(self):
+ add_TraceServiceServicer_to_server(
+ TraceServiceServicerALREADY_EXISTS(), self.server
+ )
+ self.assertEqual(
+ self.exporter.export([self.span]), SpanExportResult.FAILURE
+ )
+
def test_translate_spans(self):
expected = ExportTraceServiceRequest(
diff --git a/exporter/opentelemetry-exporter-zipkin/CHANGELOG.md b/exporter/opentelemetry-exporter-zipkin/CHANGELOG.md
index 56678833143..b1066d081a5 100644
--- a/exporter/opentelemetry-exporter-zipkin/CHANGELOG.md
+++ b/exporter/opentelemetry-exporter-zipkin/CHANGELOG.md
@@ -2,6 +2,12 @@
## Unreleased
+- Add support for OTEL_EXPORTER_ZIPKIN_ENDPOINT env var. As part of this change, the
+ configuration of the ZipkinSpanExporter exposes a `url` argument to replace `host_name`,
+ `port`, `protocol`, `endpoint`. This brings this implementation inline with other
+ implementations.
+ ([#1064](https://github.com/open-telemetry/opentelemetry-python/pull/1064))
+
## Version 0.12b0
Released 2020-08-14
diff --git a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
index b0eb1bce0f1..6b3ce2df9e8 100644
--- a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
+++ b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
@@ -24,6 +24,7 @@
.. _Zipkin: https://zipkin.io/
.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
+.. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md#zipkin-exporter
.. code:: python
@@ -39,10 +40,7 @@
zipkin_exporter = zipkin.ZipkinSpanExporter(
service_name="my-helloworld-service",
# optional:
- # host_name="localhost",
- # port=9411,
- # endpoint="/api/v2/spans",
- # protocol="http",
+ # url="http://localhost:9411/api/v2/spans",
# ipv4="",
# ipv6="",
# retry=False,
@@ -57,24 +55,25 @@
with tracer.start_as_current_span("foo"):
print("Hello world!")
+The exporter supports endpoint configuration via the OTEL_EXPORTER_ZIPKIN_ENDPOINT environment variables as defined in the `Specification`_
+
API
---
"""
import json
import logging
+import os
from typing import Optional, Sequence
+from urllib.parse import urlparse
import requests
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from opentelemetry.trace import Span, SpanContext, SpanKind
-DEFAULT_ENDPOINT = "/api/v2/spans"
-DEFAULT_HOST_NAME = "localhost"
-DEFAULT_PORT = 9411
-DEFAULT_PROTOCOL = "http"
DEFAULT_RETRY = False
+DEFAULT_URL = "http://localhost:9411/api/v2/spans"
ZIPKIN_HEADERS = {"Content-Type": "application/json"}
SPAN_KIND_MAP = {
@@ -96,10 +95,7 @@ class ZipkinSpanExporter(SpanExporter):
Args:
service_name: Service that logged an annotation in a trace.Classifier
when query for spans.
- host_name: The host name of the Zipkin server
- port: The port of the Zipkin server
- endpoint: The endpoint of the Zipkin server
- protocol: The protocol used for the request.
+ url: The Zipkin endpoint URL
ipv4: Primary IPv4 address associated with this connection.
ipv6: Primary IPv6 address associated with this connection.
retry: Set to True to configure the exporter to retry on failure.
@@ -108,22 +104,21 @@ class ZipkinSpanExporter(SpanExporter):
def __init__(
self,
service_name: str,
- host_name: str = DEFAULT_HOST_NAME,
- port: int = DEFAULT_PORT,
- endpoint: str = DEFAULT_ENDPOINT,
- protocol: str = DEFAULT_PROTOCOL,
+ url: str = None,
ipv4: Optional[str] = None,
ipv6: Optional[str] = None,
retry: Optional[str] = DEFAULT_RETRY,
):
self.service_name = service_name
- self.host_name = host_name
- self.port = port
- self.endpoint = endpoint
- self.protocol = protocol
- self.url = "{}://{}:{}{}".format(
- self.protocol, self.host_name, self.port, self.endpoint
- )
+ if url is None:
+ self.url = os.environ.get(
+ "OTEL_EXPORTER_ZIPKIN_ENDPOINT", DEFAULT_URL
+ )
+ else:
+ self.url = url
+
+ self.port = urlparse(self.url).port
+
self.ipv4 = ipv4
self.ipv6 = ipv6
self.retry = retry
@@ -216,7 +211,7 @@ def _extract_tags_from_dict(tags_dict):
def _extract_tags_from_span(span: Span):
tags = _extract_tags_from_dict(getattr(span, "attributes", None))
if span.resource:
- tags.update(_extract_tags_from_dict(span.resource.labels))
+ tags.update(_extract_tags_from_dict(span.resource.attributes))
return tags
diff --git a/exporter/opentelemetry-exporter-zipkin/tests/test_zipkin_exporter.py b/exporter/opentelemetry-exporter-zipkin/tests/test_zipkin_exporter.py
index f6e24a1495a..96586d91e06 100644
--- a/exporter/opentelemetry-exporter-zipkin/tests/test_zipkin_exporter.py
+++ b/exporter/opentelemetry-exporter-zipkin/tests/test_zipkin_exporter.py
@@ -13,6 +13,7 @@
# limitations under the License.
import json
+import os
import unittest
from unittest.mock import MagicMock, patch
@@ -43,54 +44,56 @@ def setUp(self):
self._test_span.start()
self._test_span.end()
+ def tearDown(self):
+ if "OTEL_EXPORTER_ZIPKIN_ENDPOINT" in os.environ:
+ del os.environ["OTEL_EXPORTER_ZIPKIN_ENDPOINT"]
+
+ def test_constructor_env_var(self):
+ """Test the default values assigned by constructor."""
+ url = "https://foo:9911/path"
+ os.environ["OTEL_EXPORTER_ZIPKIN_ENDPOINT"] = url
+ service_name = "my-service-name"
+ port = 9911
+ exporter = ZipkinSpanExporter(service_name)
+ ipv4 = None
+ ipv6 = None
+
+ self.assertEqual(exporter.service_name, service_name)
+ self.assertEqual(exporter.ipv4, ipv4)
+ self.assertEqual(exporter.ipv6, ipv6)
+ self.assertEqual(exporter.url, url)
+ self.assertEqual(exporter.port, port)
+
def test_constructor_default(self):
"""Test the default values assigned by constructor."""
service_name = "my-service-name"
- host_name = "localhost"
port = 9411
- endpoint = "/api/v2/spans"
exporter = ZipkinSpanExporter(service_name)
ipv4 = None
ipv6 = None
- protocol = "http"
url = "http://localhost:9411/api/v2/spans"
self.assertEqual(exporter.service_name, service_name)
- self.assertEqual(exporter.host_name, host_name)
self.assertEqual(exporter.port, port)
- self.assertEqual(exporter.endpoint, endpoint)
self.assertEqual(exporter.ipv4, ipv4)
self.assertEqual(exporter.ipv6, ipv6)
- self.assertEqual(exporter.protocol, protocol)
self.assertEqual(exporter.url, url)
def test_constructor_explicit(self):
"""Test the constructor passing all the options."""
service_name = "my-opentelemetry-zipkin"
- host_name = "opentelemetry.io"
port = 15875
- endpoint = "/myapi/traces?format=zipkin"
ipv4 = "1.2.3.4"
ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
- protocol = "https"
url = "https://opentelemetry.io:15875/myapi/traces?format=zipkin"
exporter = ZipkinSpanExporter(
- service_name=service_name,
- host_name=host_name,
- port=port,
- endpoint=endpoint,
- ipv4=ipv4,
- ipv6=ipv6,
- protocol=protocol,
+ service_name=service_name, url=url, ipv4=ipv4, ipv6=ipv6,
)
self.assertEqual(exporter.service_name, service_name)
- self.assertEqual(exporter.host_name, host_name)
self.assertEqual(exporter.port, port)
- self.assertEqual(exporter.endpoint, endpoint)
self.assertEqual(exporter.ipv4, ipv4)
self.assertEqual(exporter.ipv6, ipv6)
- self.assertEqual(exporter.protocol, protocol)
self.assertEqual(exporter.url, url)
# pylint: disable=too-many-locals
@@ -165,6 +168,7 @@ def test_export(self):
]
otel_spans[0].start(start_time=start_times[0])
+ otel_spans[0].resource = Resource({})
# added here to preserve order
otel_spans[0].set_attribute("key_bool", False)
otel_spans[0].set_attribute("key_string", "hello_world")
@@ -173,18 +177,19 @@ def test_export(self):
otel_spans[1].start(start_time=start_times[1])
otel_spans[1].resource = Resource(
- labels={"key_resource": "some_resource"}
+ attributes={"key_resource": "some_resource"}
)
otel_spans[1].end(end_time=end_times[1])
otel_spans[2].start(start_time=start_times[2])
otel_spans[2].set_attribute("key_string", "hello_world")
otel_spans[2].resource = Resource(
- labels={"key_resource": "some_resource"}
+ attributes={"key_resource": "some_resource"}
)
otel_spans[2].end(end_time=end_times[2])
otel_spans[3].start(start_time=start_times[3])
+ otel_spans[3].resource = Resource({})
otel_spans[3].end(end_time=end_times[3])
service_name = "test-service"
@@ -295,6 +300,7 @@ def test_zero_padding(self):
)
otel_span.start(start_time=start_time)
+ otel_span.resource = Resource({})
otel_span.end(end_time=end_time)
service_name = "test-service"
diff --git a/instrumentation/opentelemetry-instrumentation-aiohttp-client/setup.cfg b/instrumentation/opentelemetry-instrumentation-aiohttp-client/setup.cfg
index 557b8d9a087..a222f323c06 100644
--- a/instrumentation/opentelemetry-instrumentation-aiohttp-client/setup.cfg
+++ b/instrumentation/opentelemetry-instrumentation-aiohttp-client/setup.cfg
@@ -23,7 +23,7 @@ url = https://github.com/open-telemetry/opentelemetry-python/instrumentation/ope
platforms = any
license = Apache-2.0
classifiers =
- Development Status :: 3 - Alpha
+ Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/README.rst b/instrumentation/opentelemetry-instrumentation-asgi/README.rst
index f2b760976a7..3eb8e2dda72 100644
--- a/instrumentation/opentelemetry-instrumentation-asgi/README.rst
+++ b/instrumentation/opentelemetry-instrumentation-asgi/README.rst
@@ -1,5 +1,5 @@
-OpenTelemetry ASGI Middleware
-=============================
+OpenTelemetry ASGI Instrumentation
+==================================
|pypi|
@@ -54,6 +54,17 @@ Modify the application's ``asgi.py`` file as shown below.
application = OpenTelemetryMiddleware(application)
+Usage (Raw ASGI)
+----------------
+
+.. code-block:: python
+
+ from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
+
+ app = ... # An ASGI application.
+ app = OpenTelemetryMiddleware(app)
+
+
References
----------
diff --git a/instrumentation/opentelemetry-instrumentation-boto/setup.cfg b/instrumentation/opentelemetry-instrumentation-boto/setup.cfg
index 0bf6820702b..994d5fe784e 100644
--- a/instrumentation/opentelemetry-instrumentation-boto/setup.cfg
+++ b/instrumentation/opentelemetry-instrumentation-boto/setup.cfg
@@ -56,4 +56,4 @@ where = src
[options.entry_points]
opentelemetry_instrumentor =
- django = opentelemetry.instrumentation.boto:BotoInstrumentor
+ boto = opentelemetry.instrumentation.boto:BotoInstrumentor
diff --git a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
index e6a0e351e5c..8e03cd6e749 100644
--- a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
@@ -126,13 +126,15 @@ def _common_request( # pylint: disable=too-many-locals
if args:
http_method = args[0]
span.resource = Resource(
- labels={
+ attributes={
"endpoint": endpoint_name,
"http_method": http_method.lower(),
}
)
else:
- span.resource = Resource(labels={"endpoint": endpoint_name})
+ span.resource = Resource(
+ attributes={"endpoint": endpoint_name}
+ )
add_span_arg_tags(
span, endpoint_name, args, args_name, traced_args,
diff --git a/instrumentation/opentelemetry-instrumentation-boto/tests/test_boto_instrumentation.py b/instrumentation/opentelemetry-instrumentation-boto/tests/test_boto_instrumentation.py
index 7ed87755018..0a4a4b8869a 100644
--- a/instrumentation/opentelemetry-instrumentation-boto/tests/test_boto_instrumentation.py
+++ b/instrumentation/opentelemetry-instrumentation-boto/tests/test_boto_instrumentation.py
@@ -73,7 +73,7 @@ def test_ec2_client(self):
self.assertEqual(
span.resource,
Resource(
- labels={"endpoint": "ec2", "http_method": "runinstances"}
+ attributes={"endpoint": "ec2", "http_method": "runinstances"}
),
)
self.assertEqual(span.attributes["http.method"], "POST")
@@ -131,7 +131,7 @@ def test_s3_client(self):
assert_span_http_status_code(span, 200)
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "s3", "http_method": "head"}),
+ Resource(attributes={"endpoint": "s3", "http_method": "head"}),
)
self.assertEqual(span.attributes["http.method"], "HEAD")
self.assertEqual(span.attributes["aws.operation"], "head_bucket")
@@ -146,7 +146,7 @@ def test_s3_client(self):
span = spans[2]
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "s3", "http_method": "head"}),
+ Resource(attributes={"endpoint": "s3", "http_method": "head"}),
)
@mock_s3_deprecated
@@ -166,13 +166,13 @@ def test_s3_put(self):
assert_span_http_status_code(spans[0], 200)
self.assertEqual(
spans[0].resource,
- Resource(labels={"endpoint": "s3", "http_method": "put"}),
+ Resource(attributes={"endpoint": "s3", "http_method": "put"}),
)
# get bucket
self.assertEqual(spans[1].attributes["aws.operation"], "head_bucket")
self.assertEqual(
spans[1].resource,
- Resource(labels={"endpoint": "s3", "http_method": "head"}),
+ Resource(attributes={"endpoint": "s3", "http_method": "head"}),
)
# put object
self.assertEqual(
@@ -180,7 +180,7 @@ def test_s3_put(self):
)
self.assertEqual(
spans[2].resource,
- Resource(labels={"endpoint": "s3", "http_method": "put"}),
+ Resource(attributes={"endpoint": "s3", "http_method": "put"}),
)
@mock_lambda_deprecated
@@ -223,7 +223,7 @@ def test_lambda_client(self):
assert_span_http_status_code(span, 200)
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "lambda", "http_method": "get"}),
+ Resource(attributes={"endpoint": "lambda", "http_method": "get"}),
)
self.assertEqual(span.attributes["http.method"], "GET")
self.assertEqual(span.attributes["aws.region"], "us-east-2")
@@ -241,7 +241,10 @@ def test_sts_client(self):
self.assertEqual(
span.resource,
Resource(
- labels={"endpoint": "sts", "http_method": "getfederationtoken"}
+ attributes={
+ "endpoint": "sts",
+ "http_method": "getfederationtoken",
+ }
),
)
self.assertEqual(span.attributes["aws.region"], "us-west-2")
@@ -268,6 +271,6 @@ def test_elasticache_client(self):
assert spans
span = spans[0]
self.assertEqual(
- span.resource, Resource(labels={"endpoint": "elasticcache"})
+ span.resource, Resource(attributes={"endpoint": "elasticcache"})
)
self.assertEqual(span.attributes["aws.region"], "us-west-2")
diff --git a/instrumentation/opentelemetry-instrumentation-botocore/setup.cfg b/instrumentation/opentelemetry-instrumentation-botocore/setup.cfg
index bad58662ea3..c1198d356c8 100644
--- a/instrumentation/opentelemetry-instrumentation-botocore/setup.cfg
+++ b/instrumentation/opentelemetry-instrumentation-botocore/setup.cfg
@@ -54,4 +54,4 @@ where = src
[options.entry_points]
opentelemetry_instrumentor =
- django = opentelemetry.instrumentation.botocore:BotoCoreInstrumentor
+ botocore = opentelemetry.instrumentation.botocore:BotocoreInstrumentor
diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py
index 9b9b1e9a806..d716c90d684 100644
--- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/__init__.py
@@ -100,14 +100,16 @@ def _patched_api_call(self, original_func, instance, args, kwargs):
if args:
operation = args[0]
span.resource = Resource(
- labels={
+ attributes={
"endpoint": endpoint_name,
"operation": operation.lower(),
}
)
else:
- span.resource = Resource(labels={"endpoint": endpoint_name})
+ span.resource = Resource(
+ attributes={"endpoint": endpoint_name}
+ )
add_span_arg_tags(
span,
diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_instrumentation.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_instrumentation.py
index 47073478fce..64c9d024027 100644
--- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_instrumentation.py
+++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_instrumentation.py
@@ -53,7 +53,10 @@ def test_traced_client(self):
self.assertEqual(
span.resource,
Resource(
- labels={"endpoint": "ec2", "operation": "describeinstances"}
+ attributes={
+ "endpoint": "ec2",
+ "operation": "describeinstances",
+ }
),
)
self.assertEqual(span.name, "ec2.command")
@@ -81,7 +84,9 @@ def test_s3_client(self):
assert_span_http_status_code(span, 200)
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "s3", "operation": "listbuckets"}),
+ Resource(
+ attributes={"endpoint": "s3", "operation": "listbuckets"}
+ ),
)
# testing for span error
@@ -93,36 +98,41 @@ def test_s3_client(self):
span = spans[2]
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "s3", "operation": "listobjects"}),
+ Resource(
+ attributes={"endpoint": "s3", "operation": "listobjects"}
+ ),
)
- @mock_s3
- def test_s3_put(self):
- params = dict(Key="foo", Bucket="mybucket", Body=b"bar")
- s3 = self.session.create_client("s3", region_name="us-west-2")
- s3.create_bucket(Bucket="mybucket")
- s3.put_object(**params)
-
- spans = self.memory_exporter.get_finished_spans()
- assert spans
- span = spans[0]
- self.assertEqual(len(spans), 2)
- self.assertEqual(span.attributes["aws.operation"], "CreateBucket")
- assert_span_http_status_code(span, 200)
- self.assertEqual(
- span.resource,
- Resource(labels={"endpoint": "s3", "operation": "createbucket"}),
- )
- self.assertEqual(spans[1].attributes["aws.operation"], "PutObject")
- self.assertEqual(
- spans[1].resource,
- Resource(labels={"endpoint": "s3", "operation": "putobject"}),
- )
- self.assertEqual(spans[1].attributes["params.Key"], str(params["Key"]))
- self.assertEqual(
- spans[1].attributes["params.Bucket"], str(params["Bucket"])
- )
- self.assertTrue("params.Body" not in spans[1].attributes.keys())
+ # Comment test for issue 1088
+ # @mock_s3
+ # def test_s3_put(self):
+ # params = dict(Key="foo", Bucket="mybucket", Body=b"bar")
+ # s3 = self.session.create_client("s3", region_name="us-west-2")
+ # s3.create_bucket(Bucket="mybucket")
+ # s3.put_object(**params)
+
+ # spans = self.memory_exporter.get_finished_spans()
+ # assert spans
+ # span = spans[0]
+ # self.assertEqual(len(spans), 2)
+ # self.assertEqual(span.attributes["aws.operation"], "CreateBucket")
+ # assert_span_http_status_code(span, 200)
+ # self.assertEqual(
+ # span.resource,
+ # Resource(
+ # attributes={"endpoint": "s3", "operation": "createbucket"}
+ # ),
+ # )
+ # self.assertEqual(spans[1].attributes["aws.operation"], "PutObject")
+ # self.assertEqual(
+ # spans[1].resource,
+ # Resource(attributes={"endpoint": "s3", "operation": "putobject"}),
+ # )
+ # self.assertEqual(spans[1].attributes["params.Key"], str(params["Key"]))
+ # self.assertEqual(
+ # spans[1].attributes["params.Bucket"], str(params["Bucket"])
+ # )
+ # self.assertTrue("params.Body" not in spans[1].attributes.keys())
@mock_sqs
def test_sqs_client(self):
@@ -139,7 +149,9 @@ def test_sqs_client(self):
assert_span_http_status_code(span, 200)
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "sqs", "operation": "listqueues"}),
+ Resource(
+ attributes={"endpoint": "sqs", "operation": "listqueues"}
+ ),
)
@mock_kinesis
@@ -160,7 +172,7 @@ def test_kinesis_client(self):
self.assertEqual(
span.resource,
Resource(
- labels={"endpoint": "kinesis", "operation": "liststreams"}
+ attributes={"endpoint": "kinesis", "operation": "liststreams"}
),
)
@@ -205,7 +217,7 @@ def test_lambda_client(self):
self.assertEqual(
span.resource,
Resource(
- labels={"endpoint": "lambda", "operation": "listfunctions"}
+ attributes={"endpoint": "lambda", "operation": "listfunctions"}
),
)
@@ -224,7 +236,7 @@ def test_kms_client(self):
assert_span_http_status_code(span, 200)
self.assertEqual(
span.resource,
- Resource(labels={"endpoint": "kms", "operation": "listkeys"}),
+ Resource(attributes={"endpoint": "kms", "operation": "listkeys"}),
)
# checking for protection on sts against security leak
diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/CHANGELOG.md b/instrumentation/opentelemetry-instrumentation-dbapi/CHANGELOG.md
index bdb9236acbb..99e1e09b553 100644
--- a/instrumentation/opentelemetry-instrumentation-dbapi/CHANGELOG.md
+++ b/instrumentation/opentelemetry-instrumentation-dbapi/CHANGELOG.md
@@ -2,6 +2,9 @@
## Unreleased
+- bugfix: cursors and connections now produce spans when used with context managers
+ ([#1028](https://github.com/open-telemetry/opentelemetry-python/pull/1028))
+
## Version 0.12b0
Released 2020-08-14
@@ -19,4 +22,4 @@ Released 2020-05-12
Released 2020-02-21
-- Initial release
\ No newline at end of file
+- Initial release
diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py
index 035c823bcfe..551f71555ad 100644
--- a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py
@@ -294,6 +294,13 @@ def cursor(self, *args, **kwargs):
self.__wrapped__.cursor(*args, **kwargs), db_api_integration
)
+ def __enter__(self):
+ self.__wrapped__.__enter__()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.__wrapped__.__exit__(*args, **kwargs)
+
return TracedConnectionProxy(connection, *args, **kwargs)
@@ -366,4 +373,11 @@ def callproc(self, *args, **kwargs):
self.__wrapped__.callproc, *args, **kwargs
)
+ def __enter__(self):
+ self.__wrapped__.__enter__()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.__wrapped__.__exit__(*args, **kwargs)
+
return TracedCursorProxy(cursor, *args, **kwargs)
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/setup.cfg b/instrumentation/opentelemetry-instrumentation-grpc/setup.cfg
index 590cb5e9cf2..6a7db72aa7d 100644
--- a/instrumentation/opentelemetry-instrumentation-grpc/setup.cfg
+++ b/instrumentation/opentelemetry-instrumentation-grpc/setup.cfg
@@ -42,7 +42,7 @@ packages=find_namespace:
install_requires =
opentelemetry-api == 0.13dev0
opentelemetry-sdk == 0.13dev0
- grpcio == 1.30
+ grpcio ~= 1.27
[options.extras_require]
test =
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_client_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_client_interceptor.py
index 458f32e0472..3ed40c141c8 100644
--- a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_client_interceptor.py
+++ b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_client_interceptor.py
@@ -49,6 +49,7 @@ def tearDown(self):
GrpcInstrumentorClient().uninstrument()
self.memory_metrics_exporter.clear()
self.server.stop(None)
+ self.channel.close()
def _verify_success_records(self, num_bytes_out, num_bytes_in, method):
# pylint: disable=protected-access,no-member
diff --git a/instrumentation/opentelemetry-instrumentation-opentracing-shim/src/opentelemetry/instrumentation/opentracing_shim/__init__.py b/instrumentation/opentelemetry-instrumentation-opentracing-shim/src/opentelemetry/instrumentation/opentracing_shim/__init__.py
index 90d7f0a30cb..6bb22130d8e 100644
--- a/instrumentation/opentelemetry-instrumentation-opentracing-shim/src/opentelemetry/instrumentation/opentracing_shim/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-opentracing-shim/src/opentelemetry/instrumentation/opentracing_shim/__init__.py
@@ -100,8 +100,8 @@
)
from opentelemetry import propagators
+from opentelemetry.baggage import get_baggage, set_baggage
from opentelemetry.context import Context, attach, detach, get_value, set_value
-from opentelemetry.correlationcontext import get_correlation, set_correlation
from opentelemetry.instrumentation.opentracing_shim import util
from opentelemetry.instrumentation.opentracing_shim.version import __version__
from opentelemetry.trace import INVALID_SPAN_CONTEXT, DefaultSpan, Link
@@ -290,7 +290,7 @@ def set_baggage_item(self, key: str, value: str):
value: A tag value.
"""
# pylint: disable=protected-access
- self._context._baggage = set_correlation(
+ self._context._baggage = set_baggage(
key, value, context=self._context._baggage
)
@@ -303,7 +303,7 @@ def get_baggage_item(self, key: str) -> Optional[object]:
Returns this :class:`SpanShim` instance to allow call chaining.
"""
# pylint: disable=protected-access
- return get_correlation(key, context=self._context._baggage)
+ return get_baggage(key, context=self._context._baggage)
class ScopeShim(Scope):
@@ -676,7 +676,7 @@ def inject(self, span_context, format: object, carrier: object):
if format not in self._supported_formats:
raise UnsupportedFormatException
- propagator = propagators.get_global_httptextformat()
+ propagator = propagators.get_global_textmap()
ctx = set_span_in_context(DefaultSpan(span_context.unwrap()))
propagator.inject(type(carrier).__setitem__, carrier, context=ctx)
@@ -710,7 +710,7 @@ def get_as_list(dict_object, key):
value = dict_object.get(key)
return [value] if value is not None else []
- propagator = propagators.get_global_httptextformat()
+ propagator = propagators.get_global_textmap()
ctx = propagator.extract(get_as_list, carrier)
span = get_current_span(ctx)
if span is not None:
diff --git a/instrumentation/opentelemetry-instrumentation-opentracing-shim/tests/test_shim.py b/instrumentation/opentelemetry-instrumentation-opentracing-shim/tests/test_shim.py
index c880913a877..672e7b02f96 100644
--- a/instrumentation/opentelemetry-instrumentation-opentracing-shim/tests/test_shim.py
+++ b/instrumentation/opentelemetry-instrumentation-opentracing-shim/tests/test_shim.py
@@ -29,9 +29,9 @@
util,
)
from opentelemetry.sdk.trace import TracerProvider
-from opentelemetry.test.mock_httptextformat import (
- MockHTTPTextFormat,
- NOOPHTTPTextFormat,
+from opentelemetry.test.mock_textmap import (
+ MockTextMapPropagator,
+ NOOPTextMapPropagator,
)
@@ -46,15 +46,15 @@ def setUp(self):
@classmethod
def setUpClass(cls):
# Save current propagator to be restored on teardown.
- cls._previous_propagator = propagators.get_global_httptextformat()
+ cls._previous_propagator = propagators.get_global_textmap()
# Set mock propagator for testing.
- propagators.set_global_httptextformat(MockHTTPTextFormat())
+ propagators.set_global_textmap(MockTextMapPropagator())
@classmethod
def tearDownClass(cls):
# Restore previous propagator.
- propagators.set_global_httptextformat(cls._previous_propagator)
+ propagators.set_global_textmap(cls._previous_propagator)
def test_shim_type(self):
# Verify shim is an OpenTracing tracer.
@@ -482,8 +482,10 @@ def test_inject_http_headers(self):
headers = {}
self.shim.inject(context, opentracing.Format.HTTP_HEADERS, headers)
- self.assertEqual(headers[MockHTTPTextFormat.TRACE_ID_KEY], str(1220))
- self.assertEqual(headers[MockHTTPTextFormat.SPAN_ID_KEY], str(7478))
+ self.assertEqual(
+ headers[MockTextMapPropagator.TRACE_ID_KEY], str(1220)
+ )
+ self.assertEqual(headers[MockTextMapPropagator.SPAN_ID_KEY], str(7478))
def test_inject_text_map(self):
"""Test `inject()` method for Format.TEXT_MAP."""
@@ -496,8 +498,12 @@ def test_inject_text_map(self):
# Verify Format.TEXT_MAP
text_map = {}
self.shim.inject(context, opentracing.Format.TEXT_MAP, text_map)
- self.assertEqual(text_map[MockHTTPTextFormat.TRACE_ID_KEY], str(1220))
- self.assertEqual(text_map[MockHTTPTextFormat.SPAN_ID_KEY], str(7478))
+ self.assertEqual(
+ text_map[MockTextMapPropagator.TRACE_ID_KEY], str(1220)
+ )
+ self.assertEqual(
+ text_map[MockTextMapPropagator.SPAN_ID_KEY], str(7478)
+ )
def test_inject_binary(self):
"""Test `inject()` method for Format.BINARY."""
@@ -515,8 +521,8 @@ def test_extract_http_headers(self):
"""Test `extract()` method for Format.HTTP_HEADERS."""
carrier = {
- MockHTTPTextFormat.TRACE_ID_KEY: 1220,
- MockHTTPTextFormat.SPAN_ID_KEY: 7478,
+ MockTextMapPropagator.TRACE_ID_KEY: 1220,
+ MockTextMapPropagator.SPAN_ID_KEY: 7478,
}
ctx = self.shim.extract(opentracing.Format.HTTP_HEADERS, carrier)
@@ -527,22 +533,22 @@ def test_extract_empty_context_returns_invalid_context(self):
"""In the case where the propagator cannot extract a
SpanContext, extract should return and invalid span context.
"""
- _old_propagator = propagators.get_global_httptextformat()
- propagators.set_global_httptextformat(NOOPHTTPTextFormat())
+ _old_propagator = propagators.get_global_textmap()
+ propagators.set_global_textmap(NOOPTextMapPropagator())
try:
carrier = {}
ctx = self.shim.extract(opentracing.Format.HTTP_HEADERS, carrier)
self.assertEqual(ctx.unwrap(), trace.INVALID_SPAN_CONTEXT)
finally:
- propagators.set_global_httptextformat(_old_propagator)
+ propagators.set_global_textmap(_old_propagator)
def test_extract_text_map(self):
"""Test `extract()` method for Format.TEXT_MAP."""
carrier = {
- MockHTTPTextFormat.TRACE_ID_KEY: 1220,
- MockHTTPTextFormat.SPAN_ID_KEY: 7478,
+ MockTextMapPropagator.TRACE_ID_KEY: 1220,
+ MockTextMapPropagator.SPAN_ID_KEY: 7478,
}
ctx = self.shim.extract(opentracing.Format.TEXT_MAP, carrier)
diff --git a/instrumentation/opentelemetry-instrumentation-requests/CHANGELOG.md b/instrumentation/opentelemetry-instrumentation-requests/CHANGELOG.md
index 3f18f6101bc..eeb2e837252 100644
--- a/instrumentation/opentelemetry-instrumentation-requests/CHANGELOG.md
+++ b/instrumentation/opentelemetry-instrumentation-requests/CHANGELOG.md
@@ -2,6 +2,9 @@
## Unreleased
+- Add support for instrumenting prepared requests
+ ([#1040](https://github.com/open-telemetry/opentelemetry-python/pull/1040))
+
## Version 0.12b0
Released 2020-08-14
diff --git a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py
index e2c54b7f1b0..16e8952fea4 100644
--- a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py
@@ -29,26 +29,17 @@
opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument()
response = requests.get(url="https://www.example.org/")
-Limitations
------------
-
-Note that calls that do not use the higher-level APIs but use
-:code:`requests.sessions.Session.send` (or an alias thereof) directly, are
-currently not traced. If you find any other way to trigger an untraced HTTP
-request, please report it via a GitHub issue with :code:`[requests: untraced
-API]` in the title.
-
API
---
"""
import functools
import types
-from urllib.parse import urlparse
from requests import Timeout, URLRequired
from requests.exceptions import InvalidSchema, InvalidURL, MissingSchema
from requests.sessions import Session
+from requests.structures import CaseInsensitiveDict
from opentelemetry import context, propagators
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
@@ -57,6 +48,10 @@
from opentelemetry.trace import SpanKind, get_tracer
from opentelemetry.trace.status import Status, StatusCanonicalCode
+# A key to a context variable to avoid creating duplicate spans when instrumenting
+# both, Session.request and Session.send, since Session.request calls into Session.send
+_SUPPRESS_REQUESTS_INSTRUMENTATION_KEY = "suppress_requests_instrumentation"
+
# pylint: disable=unused-argument
def _instrument(tracer_provider=None, span_callback=None):
@@ -71,15 +66,54 @@ def _instrument(tracer_provider=None, span_callback=None):
# before v1.0.0, Dec 17, 2012, see
# https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120)
- wrapped = Session.request
+ wrapped_request = Session.request
+ wrapped_send = Session.send
- @functools.wraps(wrapped)
+ @functools.wraps(wrapped_request)
def instrumented_request(self, method, url, *args, **kwargs):
- if context.get_value("suppress_instrumentation"):
- return wrapped(self, method, url, *args, **kwargs)
+ def get_or_create_headers():
+ headers = kwargs.get("headers")
+ if headers is None:
+ headers = {}
+ kwargs["headers"] = headers
+
+ return headers
+
+ def call_wrapped():
+ return wrapped_request(self, method, url, *args, **kwargs)
+
+ return _instrumented_requests_call(
+ method, url, call_wrapped, get_or_create_headers
+ )
+
+ @functools.wraps(wrapped_send)
+ def instrumented_send(self, request, **kwargs):
+ def get_or_create_headers():
+ request.headers = (
+ request.headers
+ if request.headers is not None
+ else CaseInsensitiveDict()
+ )
+ return request.headers
+
+ def call_wrapped():
+ return wrapped_send(self, request, **kwargs)
+
+ return _instrumented_requests_call(
+ request.method, request.url, call_wrapped, get_or_create_headers
+ )
+
+ def _instrumented_requests_call(
+ method: str, url: str, call_wrapped, get_or_create_headers
+ ):
+ if context.get_value("suppress_instrumentation") or context.get_value(
+ _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY
+ ):
+ return call_wrapped()
# See
# https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#http-client
+ method = method.upper()
span_name = "HTTP {}".format(method)
exception = None
@@ -91,17 +125,19 @@ def instrumented_request(self, method, url, *args, **kwargs):
span.set_attribute("http.method", method.upper())
span.set_attribute("http.url", url)
- headers = kwargs.get("headers", {}) or {}
+ headers = get_or_create_headers()
propagators.inject(type(headers).__setitem__, headers)
- kwargs["headers"] = headers
+ token = context.attach(
+ context.set_value(_SUPPRESS_REQUESTS_INSTRUMENTATION_KEY, True)
+ )
try:
- result = wrapped(
- self, method, url, *args, **kwargs
- ) # *** PROCEED
+ result = call_wrapped() # *** PROCEED
except Exception as exc: # pylint: disable=W0703
exception = exc
result = getattr(exc, "response", None)
+ finally:
+ context.detach(token)
if exception is not None:
span.set_status(
@@ -124,24 +160,34 @@ def instrumented_request(self, method, url, *args, **kwargs):
return result
- instrumented_request.opentelemetry_ext_requests_applied = True
-
+ instrumented_request.opentelemetry_instrumentation_requests_applied = True
Session.request = instrumented_request
- # TODO: We should also instrument requests.sessions.Session.send
- # but to avoid doubled spans, we would need some context-local
- # state (i.e., only create a Span if the current context's URL is
- # different, then push the current URL, pop it afterwards)
+ instrumented_send.opentelemetry_instrumentation_requests_applied = True
+ Session.send = instrumented_send
def _uninstrument():
- # pylint: disable=global-statement
"""Disables instrumentation of :code:`requests` through this module.
Note that this only works if no other module also patches requests."""
- if getattr(Session.request, "opentelemetry_ext_requests_applied", False):
- original = Session.request.__wrapped__ # pylint:disable=no-member
- Session.request = original
+ _uninstrument_from(Session)
+
+
+def _uninstrument_from(instr_root, restore_as_bound_func=False):
+ for instr_func_name in ("request", "send"):
+ instr_func = getattr(instr_root, instr_func_name)
+ if not getattr(
+ instr_func,
+ "opentelemetry_instrumentation_requests_applied",
+ False,
+ ):
+ continue
+
+ original = instr_func.__wrapped__ # pylint:disable=no-member
+ if restore_as_bound_func:
+ original = types.MethodType(original, instr_root)
+ setattr(instr_root, instr_func_name, original)
def _exception_to_canonical_code(exc: Exception) -> StatusCanonicalCode:
@@ -179,8 +225,4 @@ def _uninstrument(self, **kwargs):
@staticmethod
def uninstrument_session(session):
"""Disables instrumentation on the session object."""
- if getattr(
- session.request, "opentelemetry_ext_requests_applied", False
- ):
- original = session.request.__wrapped__ # pylint:disable=no-member
- session.request = types.MethodType(original, session)
+ _uninstrument_from(session, restore_as_bound_func=True)
diff --git a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py
index da09118e5bc..41f5bc39d96 100644
--- a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py
+++ b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import abc
from unittest import mock
import httpretty
@@ -21,37 +22,52 @@
from opentelemetry import context, propagators, trace
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk import resources
-from opentelemetry.test.mock_httptextformat import MockHTTPTextFormat
+from opentelemetry.test.mock_textmap import MockTextMapPropagator
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCanonicalCode
-class TestRequestsIntegration(TestBase):
+class RequestsIntegrationTestBase(abc.ABC):
+ # pylint: disable=no-member
+
URL = "http://httpbin.org/status/200"
+ # pylint: disable=invalid-name
def setUp(self):
super().setUp()
RequestsInstrumentor().instrument()
httpretty.enable()
- httpretty.register_uri(
- httpretty.GET, self.URL, body="Hello!",
- )
+ httpretty.register_uri(httpretty.GET, self.URL, body="Hello!")
+ # pylint: disable=invalid-name
def tearDown(self):
super().tearDown()
RequestsInstrumentor().uninstrument()
httpretty.disable()
+ def assert_span(self, exporter=None, num_spans=1):
+ if exporter is None:
+ exporter = self.memory_exporter
+ span_list = exporter.get_finished_spans()
+ self.assertEqual(num_spans, len(span_list))
+ if num_spans == 0:
+ return None
+ if num_spans == 1:
+ return span_list[0]
+ return span_list
+
+ @staticmethod
+ @abc.abstractmethod
+ def perform_request(url: str, session: requests.Session = None):
+ pass
+
def test_basic(self):
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
-
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
+ span = self.assert_span()
self.assertIs(span.kind, trace.SpanKind.CLIENT)
- self.assertEqual(span.name, "HTTP get")
+ self.assertEqual(span.name, "HTTP GET")
self.assertEqual(
span.attributes,
@@ -77,12 +93,10 @@ def test_not_foundbasic(self):
httpretty.register_uri(
httpretty.GET, url_404, status=404,
)
- result = requests.get(url_404)
+ result = self.perform_request(url_404)
self.assertEqual(result.status_code, 404)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
+ span = self.assert_span()
self.assertEqual(span.attributes.get("http.status_code"), 404)
self.assertEqual(span.attributes.get("http.status_text"), "Not Found")
@@ -92,31 +106,11 @@ def test_not_foundbasic(self):
trace.status.StatusCanonicalCode.NOT_FOUND,
)
- def test_invalid_url(self):
- url = "http://[::1/nope"
-
- with self.assertRaises(ValueError):
- requests.post(url)
-
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
-
- self.assertEqual(span.name, "HTTP post")
- self.assertEqual(
- span.attributes,
- {"component": "http", "http.method": "POST", "http.url": url},
- )
- self.assertEqual(
- span.status.canonical_code, StatusCanonicalCode.INVALID_ARGUMENT
- )
-
def test_uninstrument(self):
RequestsInstrumentor().uninstrument()
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 0)
+ self.assert_span(num_spans=0)
# instrument again to avoid annoying warning message
RequestsInstrumentor().instrument()
@@ -124,64 +118,58 @@ def test_uninstrument_session(self):
session1 = requests.Session()
RequestsInstrumentor().uninstrument_session(session1)
- result = session1.get(self.URL)
+ result = self.perform_request(self.URL, session1)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 0)
+ self.assert_span(num_spans=0)
# Test that other sessions as well as global requests is still
# instrumented
session2 = requests.Session()
- result = session2.get(self.URL)
+ result = self.perform_request(self.URL, session2)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
+ self.assert_span()
self.memory_exporter.clear()
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
+ self.assert_span()
def test_suppress_instrumentation(self):
token = context.attach(
context.set_value("suppress_instrumentation", True)
)
try:
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
finally:
context.detach(token)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 0)
+ self.assert_span(num_spans=0)
def test_distributed_context(self):
- previous_propagator = propagators.get_global_httptextformat()
+ previous_propagator = propagators.get_global_textmap()
try:
- propagators.set_global_httptextformat(MockHTTPTextFormat())
- result = requests.get(self.URL)
+ propagators.set_global_textmap(MockTextMapPropagator())
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
+ span = self.assert_span()
headers = dict(httpretty.last_request().headers)
- self.assertIn(MockHTTPTextFormat.TRACE_ID_KEY, headers)
+ self.assertIn(MockTextMapPropagator.TRACE_ID_KEY, headers)
self.assertEqual(
str(span.get_context().trace_id),
- headers[MockHTTPTextFormat.TRACE_ID_KEY],
+ headers[MockTextMapPropagator.TRACE_ID_KEY],
)
- self.assertIn(MockHTTPTextFormat.SPAN_ID_KEY, headers)
+ self.assertIn(MockTextMapPropagator.SPAN_ID_KEY, headers)
self.assertEqual(
str(span.get_context().span_id),
- headers[MockHTTPTextFormat.SPAN_ID_KEY],
+ headers[MockTextMapPropagator.SPAN_ID_KEY],
)
finally:
- propagators.set_global_httptextformat(previous_propagator)
+ propagators.set_global_textmap(previous_propagator)
def test_span_callback(self):
RequestsInstrumentor().uninstrument()
@@ -195,13 +183,10 @@ def span_callback(span, result: requests.Response):
tracer_provider=self.tracer_provider, span_callback=span_callback,
)
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
-
+ span = self.assert_span()
self.assertEqual(
span.attributes,
{
@@ -221,28 +206,21 @@ def test_custom_tracer_provider(self):
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(tracer_provider=tracer_provider)
- result = requests.get(self.URL)
+ result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
- span_list = exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
-
+ span = self.assert_span(exporter=exporter)
self.assertIs(span.resource, resource)
- def test_if_headers_equals_none(self):
- result = requests.get(self.URL, headers=None)
- self.assertEqual(result.text, "Hello!")
-
- @mock.patch("requests.Session.send", side_effect=requests.RequestException)
+ @mock.patch(
+ "requests.adapters.HTTPAdapter.send",
+ side_effect=requests.RequestException,
+ )
def test_requests_exception_without_response(self, *_, **__):
-
with self.assertRaises(requests.RequestException):
- requests.get(self.URL)
+ self.perform_request(self.URL)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
+ span = self.assert_span()
self.assertEqual(
span.attributes,
{"component": "http", "http.method": "GET", "http.url": self.URL},
@@ -256,17 +234,14 @@ def test_requests_exception_without_response(self, *_, **__):
mocked_response.reason = "Internal Server Error"
@mock.patch(
- "requests.Session.send",
+ "requests.adapters.HTTPAdapter.send",
side_effect=requests.RequestException(response=mocked_response),
)
def test_requests_exception_with_response(self, *_, **__):
-
with self.assertRaises(requests.RequestException):
- requests.get(self.URL)
+ self.perform_request(self.URL)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
- span = span_list[0]
+ span = self.assert_span()
self.assertEqual(
span.attributes,
{
@@ -281,27 +256,66 @@ def test_requests_exception_with_response(self, *_, **__):
span.status.canonical_code, StatusCanonicalCode.INTERNAL
)
- @mock.patch("requests.Session.send", side_effect=Exception)
+ @mock.patch("requests.adapters.HTTPAdapter.send", side_effect=Exception)
def test_requests_basic_exception(self, *_, **__):
-
with self.assertRaises(Exception):
- requests.get(self.URL)
+ self.perform_request(self.URL)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
+ span = self.assert_span()
self.assertEqual(
- span_list[0].status.canonical_code, StatusCanonicalCode.UNKNOWN
+ span.status.canonical_code, StatusCanonicalCode.UNKNOWN
)
- @mock.patch("requests.Session.send", side_effect=requests.Timeout)
+ @mock.patch(
+ "requests.adapters.HTTPAdapter.send", side_effect=requests.Timeout
+ )
def test_requests_timeout_exception(self, *_, **__):
-
with self.assertRaises(Exception):
- requests.get(self.URL)
+ self.perform_request(self.URL)
- span_list = self.memory_exporter.get_finished_spans()
- self.assertEqual(len(span_list), 1)
+ span = self.assert_span()
self.assertEqual(
- span_list[0].status.canonical_code,
- StatusCanonicalCode.DEADLINE_EXCEEDED,
+ span.status.canonical_code, StatusCanonicalCode.DEADLINE_EXCEEDED
)
+
+
+class TestRequestsIntegration(RequestsIntegrationTestBase, TestBase):
+ @staticmethod
+ def perform_request(url: str, session: requests.Session = None):
+ if session is None:
+ return requests.get(url)
+ return session.get(url)
+
+ def test_invalid_url(self):
+ url = "http://[::1/nope"
+
+ with self.assertRaises(ValueError):
+ requests.post(url)
+
+ span = self.assert_span()
+
+ self.assertEqual(span.name, "HTTP POST")
+ self.assertEqual(
+ span.attributes,
+ {"component": "http", "http.method": "POST", "http.url": url},
+ )
+ self.assertEqual(
+ span.status.canonical_code, StatusCanonicalCode.INVALID_ARGUMENT
+ )
+
+ def test_if_headers_equals_none(self):
+ result = requests.get(self.URL, headers=None)
+ self.assertEqual(result.text, "Hello!")
+ self.assert_span()
+
+
+class TestRequestsIntegrationPreparedRequest(
+ RequestsIntegrationTestBase, TestBase
+):
+ @staticmethod
+ def perform_request(url: str, session: requests.Session = None):
+ if session is None:
+ session = requests.Session()
+ request = requests.Request("GET", url)
+ prepared_request = session.prepare_request(request)
+ return session.send(prepared_request)
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/setup.cfg b/instrumentation/opentelemetry-instrumentation-system-metrics/setup.cfg
index 4a93873ce19..70c784c6068 100644
--- a/instrumentation/opentelemetry-instrumentation-system-metrics/setup.cfg
+++ b/instrumentation/opentelemetry-instrumentation-system-metrics/setup.cfg
@@ -41,6 +41,7 @@ package_dir=
packages=find_namespace:
install_requires =
opentelemetry-api == 0.13dev0
+ opentelemetry-sdk == 0.13dev0
psutil ~= 5.7.0
[options.extras_require]
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py
index fcd96f8210a..9ca36d00b29 100644
--- a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py
@@ -16,12 +16,27 @@
process (CPU, memory, garbage collection) metrics. By default, the
following metrics are configured:
-"system_memory": ["total", "available", "used", "free"],
-"system_cpu": ["user", "system", "idle"],
-"network_bytes": ["bytes_recv", "bytes_sent"],
-"runtime_memory": ["rss", "vms"],
-"runtime_cpu": ["user", "system"],
+.. code:: python
+ {
+ "system.cpu.time": ["idle", "user", "system", "irq"],
+ "system.cpu.utilization": ["idle", "user", "system", "irq"],
+ "system.memory.usage": ["used", "free", "cached"],
+ "system.memory.utilization": ["used", "free", "cached"],
+ "system.swap.usage": ["used", "free"],
+ "system.swap.utilization": ["used", "free"],
+ "system.disk.io": ["read", "write"],
+ "system.disk.operations": ["read", "write"],
+ "system.disk.time": ["read", "write"],
+ "system.disk.merged": ["read", "write"],
+ "system.network.dropped.packets": ["transmit", "receive"],
+ "system.network.packets": ["transmit", "receive"],
+ "system.network.errors": ["transmit", "receive"],
+ "system.network.io": ["trasmit", "receive"],
+ "system.network.connections": ["family", "type"],
+ "runtime.memory": ["rss", "vms"],
+ "runtime.cpu.time": ["user", "system"],
+ }
Usage
-----
@@ -42,11 +57,11 @@
# to configure custom metrics
configuration = {
- "system_memory": ["total", "available", "used", "free", "active", "inactive", "wired"],
- "system_cpu": ["user", "nice", "system", "idle"],
- "network_bytes": ["bytes_recv", "bytes_sent"],
- "runtime_memory": ["rss", "vms"],
- "runtime_cpu": ["user", "system"],
+ "system.memory.usage": ["used", "free", "cached"],
+ "system.cpu.time": ["idle", "user", "system", "irq"],
+ "system.network.io": ["trasmit", "receive"],
+ "runtime.memory": ["rss", "vms"],
+ "runtime.cpu.time": ["user", "system"],
}
SystemMetrics(exporter, config=configuration)
@@ -57,16 +72,23 @@
import gc
import os
import typing
+from platform import python_implementation
import psutil
from opentelemetry import metrics
-from opentelemetry.sdk.metrics import ValueObserver
+from opentelemetry.sdk.metrics import (
+ SumObserver,
+ UpDownSumObserver,
+ ValueObserver,
+)
from opentelemetry.sdk.metrics.export import MetricsExporter
from opentelemetry.sdk.metrics.export.controller import PushController
+from opentelemetry.sdk.util import get_dict_as_key
class SystemMetrics:
+ # pylint: disable=too-many-statements
def __init__(
self,
exporter: MetricsExporter,
@@ -79,157 +101,591 @@ def __init__(
self.controller = PushController(
meter=self.meter, exporter=exporter, interval=interval
)
+ self._python_implementation = python_implementation().lower()
if config is None:
self._config = {
- "system_memory": ["total", "available", "used", "free"],
- "system_cpu": ["user", "system", "idle"],
- "network_bytes": ["bytes_recv", "bytes_sent"],
- "runtime_memory": ["rss", "vms"],
- "runtime_cpu": ["user", "system"],
+ "system.cpu.time": ["idle", "user", "system", "irq"],
+ "system.cpu.utilization": ["idle", "user", "system", "irq"],
+ "system.memory.usage": ["used", "free", "cached"],
+ "system.memory.utilization": ["used", "free", "cached"],
+ "system.swap.usage": ["used", "free"],
+ "system.swap.utilization": ["used", "free"],
+ # system.swap.page.faults: [],
+ # system.swap.page.operations: [],
+ "system.disk.io": ["read", "write"],
+ "system.disk.operations": ["read", "write"],
+ "system.disk.time": ["read", "write"],
+ "system.disk.merged": ["read", "write"],
+ # "system.filesystem.usage": [],
+ # "system.filesystem.utilization": [],
+ "system.network.dropped.packets": ["transmit", "receive"],
+ "system.network.packets": ["transmit", "receive"],
+ "system.network.errors": ["transmit", "receive"],
+ "system.network.io": ["trasmit", "receive"],
+ "system.network.connections": ["family", "type"],
+ "runtime.memory": ["rss", "vms"],
+ "runtime.cpu.time": ["user", "system"],
}
else:
self._config = config
+
self._proc = psutil.Process(os.getpid())
- self._system_memory_labels = {}
- self._system_cpu_labels = {}
- self._network_bytes_labels = {}
- self._runtime_memory_labels = {}
- self._runtime_cpu_labels = {}
- self._runtime_gc_labels = {}
- # create the label set for each observer once
- for key, value in self._labels.items():
- self._system_memory_labels[key] = value
- self._system_cpu_labels[key] = value
- self._network_bytes_labels[key] = value
- self._runtime_memory_labels[key] = value
- self._runtime_gc_labels[key] = value
-
- self.meter.register_observer(
- callback=self._get_system_memory,
- name="system.mem",
- description="System memory",
+
+ self._system_cpu_time_labels = self._labels.copy()
+ self._system_cpu_utilization_labels = self._labels.copy()
+
+ self._system_memory_usage_labels = self._labels.copy()
+ self._system_memory_utilization_labels = self._labels.copy()
+
+ self._system_swap_usage_labels = self._labels.copy()
+ self._system_swap_utilization_labels = self._labels.copy()
+ # self._system_swap_page_faults = self._labels.copy()
+ # self._system_swap_page_operations = self._labels.copy()
+
+ self._system_disk_io_labels = self._labels.copy()
+ self._system_disk_operations_labels = self._labels.copy()
+ self._system_disk_time_labels = self._labels.copy()
+ self._system_disk_merged_labels = self._labels.copy()
+
+ # self._system_filesystem_usage_labels = self._labels.copy()
+ # self._system_filesystem_utilization_labels = self._labels.copy()
+
+ self._system_network_dropped_packets_labels = self._labels.copy()
+ self._system_network_packets_labels = self._labels.copy()
+ self._system_network_errors_labels = self._labels.copy()
+ self._system_network_io_labels = self._labels.copy()
+ self._system_network_connections_labels = self._labels.copy()
+
+ self._runtime_memory_labels = self._labels.copy()
+ self._runtime_cpu_time_labels = self._labels.copy()
+ self._runtime_gc_count_labels = self._labels.copy()
+
+ self.meter.register_observer(
+ callback=self._get_system_cpu_time,
+ name="system.cpu.time",
+ description="System CPU time",
+ unit="seconds",
+ value_type=float,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_cpu_utilization,
+ name="system.cpu.utilization",
+ description="System CPU utilization",
+ unit="1",
+ value_type=float,
+ observer_type=ValueObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_memory_usage,
+ name="system.memory.usage",
+ description="System memory usage",
unit="bytes",
value_type=int,
observer_type=ValueObserver,
)
self.meter.register_observer(
- callback=self._get_system_cpu,
- name="system.cpu",
- description="System CPU",
- unit="seconds",
+ callback=self._get_system_memory_utilization,
+ name="system.memory.utilization",
+ description="System memory utilization",
+ unit="1",
value_type=float,
observer_type=ValueObserver,
)
self.meter.register_observer(
- callback=self._get_network_bytes,
- name="system.net.bytes",
- description="System network bytes",
- unit="bytes",
+ callback=self._get_system_swap_usage,
+ name="system.swap.usage",
+ description="System swap usage",
+ unit="pages",
value_type=int,
observer_type=ValueObserver,
)
+ self.meter.register_observer(
+ callback=self._get_system_swap_utilization,
+ name="system.swap.utilization",
+ description="System swap utilization",
+ unit="1",
+ value_type=float,
+ observer_type=ValueObserver,
+ )
+
+ # self.meter.register_observer(
+ # callback=self._get_system_swap_page_faults,
+ # name="system.swap.page_faults",
+ # description="System swap page faults",
+ # unit="faults",
+ # value_type=int,
+ # observer_type=SumObserver,
+ # )
+
+ # self.meter.register_observer(
+ # callback=self._get_system_swap_page_operations,
+ # name="system.swap.page_operations",
+ # description="System swap page operations",
+ # unit="operations",
+ # value_type=int,
+ # observer_type=SumObserver,
+ # )
+
+ self.meter.register_observer(
+ callback=self._get_system_disk_io,
+ name="system.disk.io",
+ description="System disk IO",
+ unit="bytes",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_disk_operations,
+ name="system.disk.operations",
+ description="System disk operations",
+ unit="operations",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_disk_time,
+ name="system.disk.time",
+ description="System disk time",
+ unit="seconds",
+ value_type=float,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_disk_merged,
+ name="system.disk.merged",
+ description="System disk merged",
+ unit="1",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ # self.meter.register_observer(
+ # callback=self._get_system_filesystem_usage,
+ # name="system.filesystem.usage",
+ # description="System filesystem usage",
+ # unit="bytes",
+ # value_type=int,
+ # observer_type=ValueObserver,
+ # )
+
+ # self.meter.register_observer(
+ # callback=self._get_system_filesystem_utilization,
+ # name="system.filesystem.utilization",
+ # description="System filesystem utilization",
+ # unit="1",
+ # value_type=float,
+ # observer_type=ValueObserver,
+ # )
+
+ self.meter.register_observer(
+ callback=self._get_system_network_dropped_packets,
+ name="system.network.dropped_packets",
+ description="System network dropped_packets",
+ unit="packets",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_network_packets,
+ name="system.network.packets",
+ description="System network packets",
+ unit="packets",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_network_errors,
+ name="system.network.errors",
+ description="System network errors",
+ unit="errors",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_network_io,
+ name="system.network.io",
+ description="System network io",
+ unit="bytes",
+ value_type=int,
+ observer_type=SumObserver,
+ )
+
+ self.meter.register_observer(
+ callback=self._get_system_network_connections,
+ name="system.network.connections",
+ description="System network connections",
+ unit="connections",
+ value_type=int,
+ observer_type=UpDownSumObserver,
+ )
+
self.meter.register_observer(
callback=self._get_runtime_memory,
- name="runtime.python.mem",
- description="Runtime memory",
+ name="runtime.{}.memory".format(self._python_implementation),
+ description="Runtime {} memory".format(
+ self._python_implementation
+ ),
unit="bytes",
value_type=int,
- observer_type=ValueObserver,
+ observer_type=SumObserver,
)
self.meter.register_observer(
- callback=self._get_runtime_cpu,
- name="runtime.python.cpu",
- description="Runtime CPU",
+ callback=self._get_runtime_cpu_time,
+ name="runtime.{}.cpu_time".format(self._python_implementation),
+ description="Runtime {} CPU time".format(
+ self._python_implementation
+ ),
unit="seconds",
value_type=float,
- observer_type=ValueObserver,
+ observer_type=SumObserver,
)
self.meter.register_observer(
callback=self._get_runtime_gc_count,
- name="runtime.python.gc.count",
- description="Runtime: gc objects",
- unit="objects",
+ name="runtime.{}.gc_count".format(self._python_implementation),
+ description="Runtime {} GC count".format(
+ self._python_implementation
+ ),
+ unit="bytes",
value_type=int,
- observer_type=ValueObserver,
+ observer_type=SumObserver,
)
- def _get_system_memory(self, observer: metrics.ValueObserver) -> None:
- """Observer callback for memory available
+ def _get_system_cpu_time(self, observer: metrics.ValueObserver) -> None:
+ """Observer callback for system CPU time
+
+ Args:
+ observer: the observer to update
+ """
+ for cpu, times in enumerate(psutil.cpu_times(percpu=True)):
+ for metric in self._config["system.cpu.time"]:
+ self._system_cpu_time_labels["state"] = metric
+ self._system_cpu_time_labels["cpu"] = cpu + 1
+ observer.observe(
+ getattr(times, metric), self._system_cpu_time_labels
+ )
+
+ def _get_system_cpu_utilization(
+ self, observer: metrics.ValueObserver
+ ) -> None:
+ """Observer callback for system CPU utilization
+
+ Args:
+ observer: the observer to update
+ """
+
+ for cpu, times_percent in enumerate(
+ psutil.cpu_times_percent(percpu=True)
+ ):
+ for metric in self._config["system.cpu.utilization"]:
+ self._system_cpu_utilization_labels["state"] = metric
+ self._system_cpu_utilization_labels["cpu"] = cpu + 1
+ observer.observe(
+ getattr(times_percent, metric) / 100,
+ self._system_cpu_utilization_labels,
+ )
+
+ def _get_system_memory_usage(
+ self, observer: metrics.ValueObserver
+ ) -> None:
+ """Observer callback for memory usage
+
+ Args:
+ observer: the observer to update
+ """
+ virtual_memory = psutil.virtual_memory()
+ for metric in self._config["system.memory.usage"]:
+ self._system_memory_usage_labels["state"] = metric
+ observer.observe(
+ getattr(virtual_memory, metric),
+ self._system_memory_usage_labels,
+ )
+
+ def _get_system_memory_utilization(
+ self, observer: metrics.ValueObserver
+ ) -> None:
+ """Observer callback for memory utilization
Args:
observer: the observer to update
"""
system_memory = psutil.virtual_memory()
- for metric in self._config["system_memory"]:
- self._system_memory_labels["type"] = metric
+
+ for metric in self._config["system.memory.utilization"]:
+ self._system_memory_utilization_labels["state"] = metric
+ observer.observe(
+ getattr(system_memory, metric) / system_memory.total,
+ self._system_memory_utilization_labels,
+ )
+
+ def _get_system_swap_usage(self, observer: metrics.ValueObserver) -> None:
+ """Observer callback for swap usage
+
+ Args:
+ observer: the observer to update
+ """
+ system_swap = psutil.swap_memory()
+
+ for metric in self._config["system.swap.usage"]:
+ self._system_swap_usage_labels["state"] = metric
observer.observe(
- getattr(system_memory, metric), self._system_memory_labels
+ getattr(system_swap, metric), self._system_swap_usage_labels
)
- def _get_system_cpu(self, observer: metrics.ValueObserver) -> None:
- """Observer callback for system cpu
+ def _get_system_swap_utilization(
+ self, observer: metrics.ValueObserver
+ ) -> None:
+ """Observer callback for swap utilization
Args:
observer: the observer to update
"""
- cpu_times = psutil.cpu_times()
- for _type in self._config["system_cpu"]:
- self._system_cpu_labels["type"] = _type
+ system_swap = psutil.swap_memory()
+
+ for metric in self._config["system.swap.utilization"]:
+ self._system_swap_utilization_labels["state"] = metric
observer.observe(
- getattr(cpu_times, _type), self._system_cpu_labels
+ getattr(system_swap, metric) / system_swap.total,
+ self._system_swap_utilization_labels,
)
- def _get_network_bytes(self, observer: metrics.ValueObserver) -> None:
- """Observer callback for network bytes
+ # TODO Add _get_system_swap_page_faults
+ # TODO Add _get_system_swap_page_operations
+
+ def _get_system_disk_io(self, observer: metrics.SumObserver) -> None:
+ """Observer callback for disk IO
Args:
observer: the observer to update
"""
- net_io = psutil.net_io_counters()
- for _type in self._config["network_bytes"]:
- self._network_bytes_labels["type"] = _type
+ for device, counters in psutil.disk_io_counters(perdisk=True).items():
+ for metric in self._config["system.disk.io"]:
+ self._system_disk_io_labels["device"] = device
+ self._system_disk_io_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "{}_bytes".format(metric)),
+ self._system_disk_io_labels,
+ )
+
+ def _get_system_disk_operations(
+ self, observer: metrics.SumObserver
+ ) -> None:
+ """Observer callback for disk operations
+
+ Args:
+ observer: the observer to update
+ """
+ for device, counters in psutil.disk_io_counters(perdisk=True).items():
+ for metric in self._config["system.disk.operations"]:
+ self._system_disk_operations_labels["device"] = device
+ self._system_disk_operations_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "{}_count".format(metric)),
+ self._system_disk_operations_labels,
+ )
+
+ def _get_system_disk_time(self, observer: metrics.SumObserver) -> None:
+ """Observer callback for disk time
+
+ Args:
+ observer: the observer to update
+ """
+ for device, counters in psutil.disk_io_counters(perdisk=True).items():
+ for metric in self._config["system.disk.time"]:
+ self._system_disk_time_labels["device"] = device
+ self._system_disk_time_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "{}_time".format(metric)) / 1000,
+ self._system_disk_time_labels,
+ )
+
+ def _get_system_disk_merged(self, observer: metrics.SumObserver) -> None:
+ """Observer callback for disk merged operations
+
+ Args:
+ observer: the observer to update
+ """
+
+ # FIXME The units in the spec is 1, it seems like it should be
+ # operations or the value type should be Double
+
+ for device, counters in psutil.disk_io_counters(perdisk=True).items():
+ for metric in self._config["system.disk.time"]:
+ self._system_disk_merged_labels["device"] = device
+ self._system_disk_merged_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "{}_merged_count".format(metric)),
+ self._system_disk_merged_labels,
+ )
+
+ # TODO Add _get_system_filesystem_usage
+ # TODO Add _get_system_filesystem_utilization
+ # TODO Filesystem information can be obtained with os.statvfs in Unix-like
+ # OSs, how to do the same in Windows?
+
+ def _get_system_network_dropped_packets(
+ self, observer: metrics.SumObserver
+ ) -> None:
+ """Observer callback for network dropped packets
+
+ Args:
+ observer: the observer to update
+ """
+
+ for device, counters in psutil.net_io_counters(pernic=True).items():
+ for metric in self._config["system.network.dropped.packets"]:
+ in_out = {"receive": "in", "transmit": "out"}[metric]
+ self._system_network_dropped_packets_labels["device"] = device
+ self._system_network_dropped_packets_labels[
+ "direction"
+ ] = metric
+ observer.observe(
+ getattr(counters, "drop{}".format(in_out)),
+ self._system_network_dropped_packets_labels,
+ )
+
+ def _get_system_network_packets(
+ self, observer: metrics.SumObserver
+ ) -> None:
+ """Observer callback for network packets
+
+ Args:
+ observer: the observer to update
+ """
+
+ for device, counters in psutil.net_io_counters(pernic=True).items():
+ for metric in self._config["system.network.dropped.packets"]:
+ recv_sent = {"receive": "recv", "transmit": "sent"}[metric]
+ self._system_network_packets_labels["device"] = device
+ self._system_network_packets_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "packets_{}".format(recv_sent)),
+ self._system_network_packets_labels,
+ )
+
+ def _get_system_network_errors(
+ self, observer: metrics.SumObserver
+ ) -> None:
+ """Observer callback for network errors
+
+ Args:
+ observer: the observer to update
+ """
+ for device, counters in psutil.net_io_counters(pernic=True).items():
+ for metric in self._config["system.network.errors"]:
+ in_out = {"receive": "in", "transmit": "out"}[metric]
+ self._system_network_errors_labels["device"] = device
+ self._system_network_errors_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "err{}".format(in_out)),
+ self._system_network_errors_labels,
+ )
+
+ def _get_system_network_io(self, observer: metrics.SumObserver) -> None:
+ """Observer callback for network IO
+
+ Args:
+ observer: the observer to update
+ """
+
+ for device, counters in psutil.net_io_counters(pernic=True).items():
+ for metric in self._config["system.network.dropped.packets"]:
+ recv_sent = {"receive": "recv", "transmit": "sent"}[metric]
+ self._system_network_io_labels["device"] = device
+ self._system_network_io_labels["direction"] = metric
+ observer.observe(
+ getattr(counters, "bytes_{}".format(recv_sent)),
+ self._system_network_io_labels,
+ )
+
+ def _get_system_network_connections(
+ self, observer: metrics.UpDownSumObserver
+ ) -> None:
+ """Observer callback for network connections
+
+ Args:
+ observer: the observer to update
+ """
+ # TODO How to find the device identifier for a particular
+ # connection?
+
+ connection_counters = {}
+
+ for net_connection in psutil.net_connections():
+ for metric in self._config["system.network.connections"]:
+ self._system_network_connections_labels["protocol"] = {
+ 1: "tcp",
+ 2: "udp",
+ }[net_connection.type.value]
+ self._system_network_connections_labels[
+ "state"
+ ] = net_connection.status
+ self._system_network_connections_labels[metric] = getattr(
+ net_connection, metric
+ )
+
+ connection_counters_key = get_dict_as_key(
+ self._system_network_connections_labels
+ )
+
+ if connection_counters_key in connection_counters.keys():
+ connection_counters[connection_counters_key]["counter"] += 1
+ else:
+ connection_counters[connection_counters_key] = {
+ "counter": 1,
+ "labels": self._system_network_connections_labels.copy(),
+ }
+
+ for connection_counter in connection_counters.values():
observer.observe(
- getattr(net_io, _type), self._network_bytes_labels
+ connection_counter["counter"], connection_counter["labels"],
)
- def _get_runtime_memory(self, observer: metrics.ValueObserver) -> None:
+ def _get_runtime_memory(self, observer: metrics.SumObserver) -> None:
"""Observer callback for runtime memory
Args:
observer: the observer to update
"""
proc_memory = self._proc.memory_info()
- for _type in self._config["runtime_memory"]:
- self._runtime_memory_labels["type"] = _type
+ for metric in self._config["runtime.memory"]:
+ self._runtime_memory_labels["type"] = metric
observer.observe(
- getattr(proc_memory, _type), self._runtime_memory_labels
+ getattr(proc_memory, metric), self._runtime_memory_labels,
)
- def _get_runtime_cpu(self, observer: metrics.ValueObserver) -> None:
- """Observer callback for runtime CPU
+ def _get_runtime_cpu_time(self, observer: metrics.SumObserver) -> None:
+ """Observer callback for runtime CPU time
Args:
observer: the observer to update
"""
proc_cpu = self._proc.cpu_times()
- for _type in self._config["runtime_cpu"]:
- self._runtime_cpu_labels["type"] = _type
+ for metric in self._config["runtime.cpu.time"]:
+ self._runtime_cpu_time_labels["type"] = metric
observer.observe(
- getattr(proc_cpu, _type), self._runtime_cpu_labels
+ getattr(proc_cpu, metric), self._runtime_cpu_time_labels,
)
- def _get_runtime_gc_count(self, observer: metrics.ValueObserver) -> None:
+ def _get_runtime_gc_count(self, observer: metrics.SumObserver) -> None:
"""Observer callback for garbage collection
Args:
observer: the observer to update
"""
- gc_count = gc.get_count()
- for index, count in enumerate(gc_count):
- self._runtime_gc_labels["count"] = str(index)
- observer.observe(count, self._runtime_gc_labels)
+ for index, count in enumerate(gc.get_count()):
+ self._runtime_gc_count_labels["count"] = str(index)
+ observer.observe(count, self._runtime_gc_count_labels)
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py b/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py
index b9ae662af19..2f155383f46 100644
--- a/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py
+++ b/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py
@@ -12,11 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=protected-access
+
from collections import namedtuple
+from platform import python_implementation
from unittest import mock
from opentelemetry import metrics
from opentelemetry.instrumentation.system_metrics import SystemMetrics
+from opentelemetry.sdk.metrics.export.aggregate import ValueObserverAggregator
from opentelemetry.test.test_base import TestBase
@@ -24,6 +28,7 @@ class TestSystemMetrics(TestBase):
def setUp(self):
super().setUp()
self.memory_metrics_exporter.clear()
+ self.implementation = python_implementation().lower()
def test_system_metrics_constructor(self):
# ensure the observers have been registered
@@ -31,15 +36,30 @@ def test_system_metrics_constructor(self):
with mock.patch("opentelemetry.metrics.get_meter") as mock_get_meter:
mock_get_meter.return_value = meter
SystemMetrics(self.memory_metrics_exporter)
- self.assertEqual(len(meter.observers), 6)
+
+ self.assertEqual(len(meter.observers), 18)
+
observer_names = [
- "system.mem",
- "system.cpu",
- "system.net.bytes",
- "runtime.python.mem",
- "runtime.python.cpu",
- "runtime.python.gc.count",
+ "system.cpu.time",
+ "system.cpu.utilization",
+ "system.memory.usage",
+ "system.memory.utilization",
+ "system.swap.usage",
+ "system.swap.utilization",
+ "system.disk.io",
+ "system.disk.operations",
+ "system.disk.time",
+ "system.disk.merged",
+ "system.network.dropped_packets",
+ "system.network.packets",
+ "system.network.errors",
+ "system.network.io",
+ "system.network.connections",
+ "runtime.{}.memory".format(self.implementation),
+ "runtime.{}.cpu_time".format(self.implementation),
+ "runtime.{}.gc_count".format(self.implementation),
]
+
for observer in meter.observers:
self.assertIn(observer.name, observer_names)
observer_names.remove(observer.name)
@@ -57,7 +77,7 @@ def _assert_metrics(self, observer_name, system_metrics, expected):
and metric.instrument.name == observer_name
):
self.assertEqual(
- metric.aggregator.checkpoint.last, expected[metric.labels],
+ metric.aggregator.checkpoint, expected[metric.labels],
)
assertions += 1
self.assertEqual(len(expected), assertions)
@@ -70,134 +90,614 @@ def _test_metrics(self, observer_name, expected):
system_metrics = SystemMetrics(self.memory_metrics_exporter)
self._assert_metrics(observer_name, system_metrics, expected)
+ # When this test case is executed, _get_system_cpu_utilization gets run
+ # too because of the controller thread which runs all observers. This patch
+ # is added here to stop a warning that would otherwise be raised.
+ # pylint: disable=unused-argument
+ @mock.patch("psutil.cpu_times_percent")
@mock.patch("psutil.cpu_times")
- def test_system_cpu(self, mock_cpu_times):
- CPUTimes = namedtuple("CPUTimes", ["user", "nice", "system", "idle"])
- mock_cpu_times.return_value = CPUTimes(
- user=332277.48, nice=0.0, system=309836.43, idle=6724698.94
+ def test_system_cpu_time(self, mock_cpu_times, mock_cpu_times_percent):
+ CPUTimes = namedtuple("CPUTimes", ["idle", "user", "system", "irq"])
+ mock_cpu_times.return_value = [
+ CPUTimes(idle=1.2, user=3.4, system=5.6, irq=7.8),
+ CPUTimes(idle=1.2, user=3.4, system=5.6, irq=7.8),
+ ]
+
+ expected = {
+ (("cpu", 1), ("state", "idle"),): 1.2,
+ (("cpu", 1), ("state", "user"),): 3.4,
+ (("cpu", 1), ("state", "system"),): 5.6,
+ (("cpu", 1), ("state", "irq"),): 7.8,
+ (("cpu", 2), ("state", "idle"),): 1.2,
+ (("cpu", 2), ("state", "user"),): 3.4,
+ (("cpu", 2), ("state", "system"),): 5.6,
+ (("cpu", 2), ("state", "irq"),): 7.8,
+ }
+ self._test_metrics("system.cpu.time", expected)
+
+ @mock.patch("psutil.cpu_times_percent")
+ def test_system_cpu_utilization(self, mock_cpu_times_percent):
+ CPUTimesPercent = namedtuple(
+ "CPUTimesPercent", ["idle", "user", "system", "irq"]
)
+ mock_cpu_times_percent.return_value = [
+ CPUTimesPercent(idle=1.2, user=3.4, system=5.6, irq=7.8),
+ CPUTimesPercent(idle=1.2, user=3.4, system=5.6, irq=7.8),
+ ]
expected = {
- (("type", "user"),): 332277.48,
- (("type", "system"),): 309836.43,
- (("type", "idle"),): 6724698.94,
+ (("cpu", 1), ("state", "idle"),): ValueObserverAggregator._TYPE(
+ min=1.2 / 100,
+ max=1.2 / 100,
+ sum=1.2 / 100,
+ count=1,
+ last=1.2 / 100,
+ ),
+ (("cpu", 1), ("state", "user"),): ValueObserverAggregator._TYPE(
+ min=3.4 / 100,
+ max=3.4 / 100,
+ sum=3.4 / 100,
+ count=1,
+ last=3.4 / 100,
+ ),
+ (("cpu", 1), ("state", "system"),): ValueObserverAggregator._TYPE(
+ min=5.6 / 100,
+ max=5.6 / 100,
+ sum=5.6 / 100,
+ count=1,
+ last=5.6 / 100,
+ ),
+ (("cpu", 1), ("state", "irq"),): ValueObserverAggregator._TYPE(
+ min=7.8 / 100,
+ max=7.8 / 100,
+ sum=7.8 / 100,
+ count=1,
+ last=7.8 / 100,
+ ),
+ (("cpu", 2), ("state", "idle"),): ValueObserverAggregator._TYPE(
+ min=1.2 / 100,
+ max=1.2 / 100,
+ sum=1.2 / 100,
+ count=1,
+ last=1.2 / 100,
+ ),
+ (("cpu", 2), ("state", "user"),): ValueObserverAggregator._TYPE(
+ min=3.4 / 100,
+ max=3.4 / 100,
+ sum=3.4 / 100,
+ count=1,
+ last=3.4 / 100,
+ ),
+ (("cpu", 2), ("state", "system"),): ValueObserverAggregator._TYPE(
+ min=5.6 / 100,
+ max=5.6 / 100,
+ sum=5.6 / 100,
+ count=1,
+ last=5.6 / 100,
+ ),
+ (("cpu", 2), ("state", "irq"),): ValueObserverAggregator._TYPE(
+ min=7.8 / 100,
+ max=7.8 / 100,
+ sum=7.8 / 100,
+ count=1,
+ last=7.8 / 100,
+ ),
}
- self._test_metrics("system.cpu", expected)
+ self._test_metrics("system.cpu.utilization", expected)
@mock.patch("psutil.virtual_memory")
- def test_system_memory(self, mock_virtual_memory):
+ def test_system_memory_usage(self, mock_virtual_memory):
VirtualMemory = namedtuple(
- "VirtualMemory",
+ "VirtualMemory", ["used", "free", "cached", "total"]
+ )
+ mock_virtual_memory.return_value = VirtualMemory(
+ used=1, free=2, cached=3, total=4
+ )
+
+ expected = {
+ (("state", "used"),): ValueObserverAggregator._TYPE(
+ min=1, max=1, sum=1, count=1, last=1
+ ),
+ (("state", "free"),): ValueObserverAggregator._TYPE(
+ min=2, max=2, sum=2, count=1, last=2
+ ),
+ (("state", "cached"),): ValueObserverAggregator._TYPE(
+ min=3, max=3, sum=3, count=1, last=3
+ ),
+ }
+ self._test_metrics("system.memory.usage", expected)
+
+ @mock.patch("psutil.virtual_memory")
+ def test_system_memory_utilization(self, mock_virtual_memory):
+ VirtualMemory = namedtuple(
+ "VirtualMemory", ["used", "free", "cached", "total"]
+ )
+ mock_virtual_memory.return_value = VirtualMemory(
+ used=1, free=2, cached=3, total=4
+ )
+
+ expected = {
+ (("state", "used"),): ValueObserverAggregator._TYPE(
+ min=1 / 4, max=1 / 4, sum=1 / 4, count=1, last=1 / 4
+ ),
+ (("state", "free"),): ValueObserverAggregator._TYPE(
+ min=2 / 4, max=2 / 4, sum=2 / 4, count=1, last=2 / 4
+ ),
+ (("state", "cached"),): ValueObserverAggregator._TYPE(
+ min=3 / 4, max=3 / 4, sum=3 / 4, count=1, last=3 / 4
+ ),
+ }
+ self._test_metrics("system.memory.utilization", expected)
+
+ @mock.patch("psutil.swap_memory")
+ def test_system_swap_usage(self, mock_swap_memory):
+ SwapMemory = namedtuple("SwapMemory", ["used", "free", "total"])
+ mock_swap_memory.return_value = SwapMemory(used=1, free=2, total=3)
+
+ expected = {
+ (("state", "used"),): ValueObserverAggregator._TYPE(
+ min=1, max=1, sum=1, count=1, last=1
+ ),
+ (("state", "free"),): ValueObserverAggregator._TYPE(
+ min=2, max=2, sum=2, count=1, last=2
+ ),
+ }
+ self._test_metrics("system.swap.usage", expected)
+
+ @mock.patch("psutil.swap_memory")
+ def test_system_swap_utilization(self, mock_swap_memory):
+ SwapMemory = namedtuple("SwapMemory", ["used", "free", "total"])
+ mock_swap_memory.return_value = SwapMemory(used=1, free=2, total=3)
+
+ expected = {
+ (("state", "used"),): ValueObserverAggregator._TYPE(
+ min=1 / 3, max=1 / 3, sum=1 / 3, count=1, last=1 / 3
+ ),
+ (("state", "free"),): ValueObserverAggregator._TYPE(
+ min=2 / 3, max=2 / 3, sum=2 / 3, count=1, last=2 / 3
+ ),
+ }
+ self._test_metrics("system.swap.utilization", expected)
+
+ @mock.patch("psutil.disk_io_counters")
+ def test_system_disk_io(self, mock_disk_io_counters):
+ DiskIO = namedtuple(
+ "DiskIO",
[
- "total",
- "available",
- "percent",
- "used",
- "free",
- "active",
- "inactive",
- "wired",
+ "read_count",
+ "write_count",
+ "read_bytes",
+ "write_bytes",
+ "read_time",
+ "write_time",
+ "read_merged_count",
+ "write_merged_count",
],
)
- mock_virtual_memory.return_value = VirtualMemory(
- total=17179869184,
- available=5520928768,
- percent=67.9,
- used=10263990272,
- free=266964992,
- active=5282459648,
- inactive=5148700672,
- wired=4981530624,
+ mock_disk_io_counters.return_value = {
+ "sda": DiskIO(
+ read_count=1,
+ write_count=2,
+ read_bytes=3,
+ write_bytes=4,
+ read_time=5,
+ write_time=6,
+ read_merged_count=7,
+ write_merged_count=8,
+ ),
+ "sdb": DiskIO(
+ read_count=9,
+ write_count=10,
+ read_bytes=11,
+ write_bytes=12,
+ read_time=13,
+ write_time=14,
+ read_merged_count=15,
+ write_merged_count=16,
+ ),
+ }
+
+ expected = {
+ (("device", "sda"), ("direction", "read"),): 3,
+ (("device", "sda"), ("direction", "write"),): 4,
+ (("device", "sdb"), ("direction", "read"),): 11,
+ (("device", "sdb"), ("direction", "write"),): 12,
+ }
+ self._test_metrics("system.disk.io", expected)
+
+ @mock.patch("psutil.disk_io_counters")
+ def test_system_disk_operations(self, mock_disk_io_counters):
+ DiskIO = namedtuple(
+ "DiskIO",
+ [
+ "read_count",
+ "write_count",
+ "read_bytes",
+ "write_bytes",
+ "read_time",
+ "write_time",
+ "read_merged_count",
+ "write_merged_count",
+ ],
+ )
+ mock_disk_io_counters.return_value = {
+ "sda": DiskIO(
+ read_count=1,
+ write_count=2,
+ read_bytes=3,
+ write_bytes=4,
+ read_time=5,
+ write_time=6,
+ read_merged_count=7,
+ write_merged_count=8,
+ ),
+ "sdb": DiskIO(
+ read_count=9,
+ write_count=10,
+ read_bytes=11,
+ write_bytes=12,
+ read_time=13,
+ write_time=14,
+ read_merged_count=15,
+ write_merged_count=16,
+ ),
+ }
+
+ expected = {
+ (("device", "sda"), ("direction", "read"),): 1,
+ (("device", "sda"), ("direction", "write"),): 2,
+ (("device", "sdb"), ("direction", "read"),): 9,
+ (("device", "sdb"), ("direction", "write"),): 10,
+ }
+ self._test_metrics("system.disk.operations", expected)
+
+ @mock.patch("psutil.disk_io_counters")
+ def test_system_disk_time(self, mock_disk_io_counters):
+ DiskIO = namedtuple(
+ "DiskIO",
+ [
+ "read_count",
+ "write_count",
+ "read_bytes",
+ "write_bytes",
+ "read_time",
+ "write_time",
+ "read_merged_count",
+ "write_merged_count",
+ ],
)
+ mock_disk_io_counters.return_value = {
+ "sda": DiskIO(
+ read_count=1,
+ write_count=2,
+ read_bytes=3,
+ write_bytes=4,
+ read_time=5,
+ write_time=6,
+ read_merged_count=7,
+ write_merged_count=8,
+ ),
+ "sdb": DiskIO(
+ read_count=9,
+ write_count=10,
+ read_bytes=11,
+ write_bytes=12,
+ read_time=13,
+ write_time=14,
+ read_merged_count=15,
+ write_merged_count=16,
+ ),
+ }
expected = {
- (("type", "total"),): 17179869184,
- (("type", "used"),): 10263990272,
- (("type", "available"),): 5520928768,
- (("type", "free"),): 266964992,
+ (("device", "sda"), ("direction", "read"),): 5 / 1000,
+ (("device", "sda"), ("direction", "write"),): 6 / 1000,
+ (("device", "sdb"), ("direction", "read"),): 13 / 1000,
+ (("device", "sdb"), ("direction", "write"),): 14 / 1000,
}
- self._test_metrics("system.mem", expected)
+ self._test_metrics("system.disk.time", expected)
+
+ @mock.patch("psutil.disk_io_counters")
+ def test_system_disk_merged(self, mock_disk_io_counters):
+ DiskIO = namedtuple(
+ "DiskIO",
+ [
+ "read_count",
+ "write_count",
+ "read_bytes",
+ "write_bytes",
+ "read_time",
+ "write_time",
+ "read_merged_count",
+ "write_merged_count",
+ ],
+ )
+ mock_disk_io_counters.return_value = {
+ "sda": DiskIO(
+ read_count=1,
+ write_count=2,
+ read_bytes=3,
+ write_bytes=4,
+ read_time=5,
+ write_time=6,
+ read_merged_count=7,
+ write_merged_count=8,
+ ),
+ "sdb": DiskIO(
+ read_count=9,
+ write_count=10,
+ read_bytes=11,
+ write_bytes=12,
+ read_time=13,
+ write_time=14,
+ read_merged_count=15,
+ write_merged_count=16,
+ ),
+ }
+
+ expected = {
+ (("device", "sda"), ("direction", "read"),): 7,
+ (("device", "sda"), ("direction", "write"),): 8,
+ (("device", "sdb"), ("direction", "read"),): 15,
+ (("device", "sdb"), ("direction", "write"),): 16,
+ }
+ self._test_metrics("system.disk.merged", expected)
@mock.patch("psutil.net_io_counters")
- def test_network_bytes(self, mock_net_io_counters):
- NetworkIO = namedtuple(
- "NetworkIO",
- ["bytes_sent", "bytes_recv", "packets_recv", "packets_sent"],
+ def test_system_network_dropped_packets(self, mock_net_io_counters):
+ NetIO = namedtuple(
+ "NetIO",
+ [
+ "dropin",
+ "dropout",
+ "packets_sent",
+ "packets_recv",
+ "errin",
+ "errout",
+ "bytes_sent",
+ "bytes_recv",
+ ],
)
- mock_net_io_counters.return_value = NetworkIO(
- bytes_sent=23920188416,
- bytes_recv=46798894080,
- packets_sent=53127118,
- packets_recv=53205738,
+ mock_net_io_counters.return_value = {
+ "eth0": NetIO(
+ dropin=1,
+ dropout=2,
+ packets_sent=3,
+ packets_recv=4,
+ errin=5,
+ errout=6,
+ bytes_sent=7,
+ bytes_recv=8,
+ ),
+ "eth1": NetIO(
+ dropin=9,
+ dropout=10,
+ packets_sent=11,
+ packets_recv=12,
+ errin=13,
+ errout=14,
+ bytes_sent=15,
+ bytes_recv=16,
+ ),
+ }
+
+ expected = {
+ (("device", "eth0"), ("direction", "receive"),): 1,
+ (("device", "eth0"), ("direction", "transmit"),): 2,
+ (("device", "eth1"), ("direction", "receive"),): 9,
+ (("device", "eth1"), ("direction", "transmit"),): 10,
+ }
+ self._test_metrics("system.network.dropped_packets", expected)
+
+ @mock.patch("psutil.net_io_counters")
+ def test_system_network_packets(self, mock_net_io_counters):
+ NetIO = namedtuple(
+ "NetIO",
+ [
+ "dropin",
+ "dropout",
+ "packets_sent",
+ "packets_recv",
+ "errin",
+ "errout",
+ "bytes_sent",
+ "bytes_recv",
+ ],
)
+ mock_net_io_counters.return_value = {
+ "eth0": NetIO(
+ dropin=1,
+ dropout=2,
+ packets_sent=3,
+ packets_recv=4,
+ errin=5,
+ errout=6,
+ bytes_sent=7,
+ bytes_recv=8,
+ ),
+ "eth1": NetIO(
+ dropin=9,
+ dropout=10,
+ packets_sent=11,
+ packets_recv=12,
+ errin=13,
+ errout=14,
+ bytes_sent=15,
+ bytes_recv=16,
+ ),
+ }
expected = {
- (("type", "bytes_recv"),): 46798894080,
- (("type", "bytes_sent"),): 23920188416,
+ (("device", "eth0"), ("direction", "receive"),): 4,
+ (("device", "eth0"), ("direction", "transmit"),): 3,
+ (("device", "eth1"), ("direction", "receive"),): 12,
+ (("device", "eth1"), ("direction", "transmit"),): 11,
}
- self._test_metrics("system.net.bytes", expected)
+ self._test_metrics("system.network.packets", expected)
- def test_runtime_memory(self):
- meter = self.meter_provider.get_meter(__name__)
- with mock.patch("opentelemetry.metrics.get_meter") as mock_get_meter:
- mock_get_meter.return_value = meter
- system_metrics = SystemMetrics(self.memory_metrics_exporter)
+ @mock.patch("psutil.net_io_counters")
+ def test_system_network_errors(self, mock_net_io_counters):
+ NetIO = namedtuple(
+ "NetIO",
+ [
+ "dropin",
+ "dropout",
+ "packets_sent",
+ "packets_recv",
+ "errin",
+ "errout",
+ "bytes_sent",
+ "bytes_recv",
+ ],
+ )
+ mock_net_io_counters.return_value = {
+ "eth0": NetIO(
+ dropin=1,
+ dropout=2,
+ packets_sent=3,
+ packets_recv=4,
+ errin=5,
+ errout=6,
+ bytes_sent=7,
+ bytes_recv=8,
+ ),
+ "eth1": NetIO(
+ dropin=9,
+ dropout=10,
+ packets_sent=11,
+ packets_recv=12,
+ errin=13,
+ errout=14,
+ bytes_sent=15,
+ bytes_recv=16,
+ ),
+ }
- with mock.patch.object(
- system_metrics._proc, # pylint: disable=protected-access
- "memory_info",
- ) as mock_runtime_memory:
- RuntimeMemory = namedtuple(
- "RuntimeMemory", ["rss", "vms", "pfaults", "pageins"],
- )
- mock_runtime_memory.return_value = RuntimeMemory(
- rss=9777152, vms=4385665024, pfaults=2631, pageins=49
- )
- expected = {
- (("type", "rss"),): 9777152,
- (("type", "vms"),): 4385665024,
- }
- self._assert_metrics(
- "runtime.python.mem", system_metrics, expected
- )
+ expected = {
+ (("device", "eth0"), ("direction", "receive"),): 5,
+ (("device", "eth0"), ("direction", "transmit"),): 6,
+ (("device", "eth1"), ("direction", "receive"),): 13,
+ (("device", "eth1"), ("direction", "transmit"),): 14,
+ }
+ self._test_metrics("system.network.errors", expected)
- def test_runtime_cpu(self):
- meter = self.meter_provider.get_meter(__name__)
- with mock.patch("opentelemetry.metrics.get_meter") as mock_get_meter:
- mock_get_meter.return_value = meter
- system_metrics = SystemMetrics(self.memory_metrics_exporter)
+ @mock.patch("psutil.net_io_counters")
+ def test_system_network_io(self, mock_net_io_counters):
+ NetIO = namedtuple(
+ "NetIO",
+ [
+ "dropin",
+ "dropout",
+ "packets_sent",
+ "packets_recv",
+ "errin",
+ "errout",
+ "bytes_sent",
+ "bytes_recv",
+ ],
+ )
+ mock_net_io_counters.return_value = {
+ "eth0": NetIO(
+ dropin=1,
+ dropout=2,
+ packets_sent=3,
+ packets_recv=4,
+ errin=5,
+ errout=6,
+ bytes_sent=7,
+ bytes_recv=8,
+ ),
+ "eth1": NetIO(
+ dropin=9,
+ dropout=10,
+ packets_sent=11,
+ packets_recv=12,
+ errin=13,
+ errout=14,
+ bytes_sent=15,
+ bytes_recv=16,
+ ),
+ }
- with mock.patch.object(
- system_metrics._proc, # pylint: disable=protected-access
- "cpu_times",
- ) as mock_runtime_cpu_times:
- RuntimeCPU = namedtuple(
- "RuntimeCPU", ["user", "nice", "system"]
- )
- mock_runtime_cpu_times.return_value = RuntimeCPU(
- user=100.48, nice=0.0, system=200.43
- )
+ expected = {
+ (("device", "eth0"), ("direction", "receive"),): 8,
+ (("device", "eth0"), ("direction", "transmit"),): 7,
+ (("device", "eth1"), ("direction", "receive"),): 16,
+ (("device", "eth1"), ("direction", "transmit"),): 15,
+ }
+ self._test_metrics("system.network.io", expected)
- expected = {
- (("type", "user"),): 100.48,
- (("type", "system"),): 200.43,
- }
+ @mock.patch("psutil.net_connections")
+ def test_system_network_connections(self, mock_net_connections):
+ NetConnection = namedtuple(
+ "NetworkConnection", ["family", "type", "status"]
+ )
+ Type = namedtuple("Type", ["value"])
+ mock_net_connections.return_value = [
+ NetConnection(family=1, status="ESTABLISHED", type=Type(value=2),),
+ NetConnection(family=1, status="ESTABLISHED", type=Type(value=1),),
+ ]
- self._assert_metrics(
- "runtime.python.cpu", system_metrics, expected
- )
+ expected = {
+ (
+ ("family", 1),
+ ("protocol", "udp"),
+ ("state", "ESTABLISHED"),
+ ("type", Type(value=2)),
+ ): 1,
+ (
+ ("family", 1),
+ ("protocol", "tcp"),
+ ("state", "ESTABLISHED"),
+ ("type", Type(value=1)),
+ ): 1,
+ }
+ self._test_metrics("system.network.connections", expected)
+
+ @mock.patch("psutil.Process.memory_info")
+ def test_runtime_memory(self, mock_process_memory_info):
+
+ PMem = namedtuple("PMem", ["rss", "vms"])
+
+ mock_process_memory_info.configure_mock(
+ **{"return_value": PMem(rss=1, vms=2)}
+ )
+
+ expected = {
+ (("type", "rss"),): 1,
+ (("type", "vms"),): 2,
+ }
+ self._test_metrics(
+ "runtime.{}.memory".format(self.implementation), expected
+ )
+
+ @mock.patch("psutil.Process.cpu_times")
+ def test_runtime_cpu_time(self, mock_process_cpu_times):
+
+ PCPUTimes = namedtuple("PCPUTimes", ["user", "system"])
+
+ mock_process_cpu_times.configure_mock(
+ **{"return_value": PCPUTimes(user=1.1, system=2.2)}
+ )
+
+ expected = {
+ (("type", "user"),): 1.1,
+ (("type", "system"),): 2.2,
+ }
+ self._test_metrics(
+ "runtime.{}.cpu_time".format(self.implementation), expected
+ )
@mock.patch("gc.get_count")
- def test_runtime_gc_count(self, mock_gc):
- mock_gc.return_value = [
- 100, # gen0
- 50, # gen1
- 10, # gen2
- ]
+ def test_runtime_get_count(self, mock_gc_get_count):
+
+ mock_gc_get_count.configure_mock(**{"return_value": (1, 2, 3)})
expected = {
- (("count", "0"),): 100,
- (("count", "1"),): 50,
- (("count", "2"),): 10,
+ (("count", "0"),): 1,
+ (("count", "1"),): 2,
+ (("count", "2"),): 3,
}
- self._test_metrics("runtime.python.gc.count", expected)
+ self._test_metrics(
+ "runtime.{}.gc_count".format(self.implementation), expected
+ )
diff --git a/opentelemetry-api/CHANGELOG.md b/opentelemetry-api/CHANGELOG.md
index 2defb0a59a0..1f9ba4707f1 100644
--- a/opentelemetry-api/CHANGELOG.md
+++ b/opentelemetry-api/CHANGELOG.md
@@ -2,6 +2,22 @@
## Unreleased
+- Refactor `SpanContext.is_valid` from a method to a data attribute
+ ([#1005](https://github.com/open-telemetry/opentelemetry-python/pull/1005))
+- Moved samplers from API to SDK
+ ([#1023](https://github.com/open-telemetry/opentelemetry-python/pull/1023))
+- Change return value type of `correlationcontext.get_correlations` to immutable `MappingProxyType`
+ ([#1024](https://github.com/open-telemetry/opentelemetry-python/pull/1024))
+- Change is_recording_events to is_recording
+ ([#1034](https://github.com/open-telemetry/opentelemetry-python/pull/1034))
+- Remove lazy Event and Link API from Span interface
+ ([#1045](https://github.com/open-telemetry/opentelemetry-python/pull/1045))
+- Rename CorrelationContext to Baggage
+ ([#1060](https://github.com/open-telemetry/opentelemetry-python/pull/1060))
+- Rename HTTPTextFormat to TextMapPropagator. This change also updates `get_global_httptextformat` and
+ `set_global_httptextformat` to `get_global_textmap` and `set_global_textmap`
+ ([#1085](https://github.com/open-telemetry/opentelemetry-python/pull/1085))
+
## Version 0.12b0
Released 2020-08-14
diff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/__init__.py
similarity index 62%
rename from opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py
rename to opentelemetry-api/src/opentelemetry/baggage/__init__.py
index c16d75162ad..cdb2196f741 100644
--- a/opentelemetry-api/src/opentelemetry/correlationcontext/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/__init__.py
@@ -13,35 +13,36 @@
# limitations under the License.
import typing
+from types import MappingProxyType
from opentelemetry.context import get_value, set_value
from opentelemetry.context.context import Context
-_CORRELATION_CONTEXT_KEY = "correlation-context"
+_BAGGAGE_KEY = "baggage"
-def get_correlations(
+def get_all(
context: typing.Optional[Context] = None,
-) -> typing.Dict[str, object]:
- """Returns the name/value pairs in the CorrelationContext
+) -> typing.Mapping[str, object]:
+ """Returns the name/value pairs in the Baggage
Args:
context: The Context to use. If not set, uses current Context
Returns:
- Name/value pairs in the CorrelationContext
+ The name/value pairs in the Baggage
"""
- correlations = get_value(_CORRELATION_CONTEXT_KEY, context=context)
- if isinstance(correlations, dict):
- return correlations.copy()
- return {}
+ baggage = get_value(_BAGGAGE_KEY, context=context)
+ if isinstance(baggage, dict):
+ return MappingProxyType(baggage.copy())
+ return MappingProxyType({})
-def get_correlation(
+def get_baggage(
name: str, context: typing.Optional[Context] = None
) -> typing.Optional[object]:
"""Provides access to the value for a name/value pair in the
- CorrelationContext
+ Baggage
Args:
name: The name of the value to retrieve
@@ -51,13 +52,13 @@ def get_correlation(
The value associated with the given name, or null if the given name is
not present.
"""
- return get_correlations(context=context).get(name)
+ return get_all(context=context).get(name)
-def set_correlation(
+def set_baggage(
name: str, value: object, context: typing.Optional[Context] = None
) -> Context:
- """Sets a value in the CorrelationContext
+ """Sets a value in the Baggage
Args:
name: The name of the value to set
@@ -67,15 +68,15 @@ def set_correlation(
Returns:
A Context with the value updated
"""
- correlations = get_correlations(context=context)
- correlations[name] = value
- return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
+ baggage = dict(get_all(context=context))
+ baggage[name] = value
+ return set_value(_BAGGAGE_KEY, baggage, context=context)
-def remove_correlation(
+def remove_baggage(
name: str, context: typing.Optional[Context] = None
) -> Context:
- """Removes a value from the CorrelationContext
+ """Removes a value from the Baggage
Args:
name: The name of the value to remove
@@ -84,19 +85,19 @@ def remove_correlation(
Returns:
A Context with the name/value removed
"""
- correlations = get_correlations(context=context)
- correlations.pop(name, None)
+ baggage = dict(get_all(context=context))
+ baggage.pop(name, None)
- return set_value(_CORRELATION_CONTEXT_KEY, correlations, context=context)
+ return set_value(_BAGGAGE_KEY, baggage, context=context)
-def clear_correlations(context: typing.Optional[Context] = None) -> Context:
- """Removes all values from the CorrelationContext
+def clear(context: typing.Optional[Context] = None) -> Context:
+ """Removes all values from the Baggage
Args:
context: The Context to use. If not set, uses current Context
Returns:
- A Context with all correlations removed
+ A Context with all baggage entries removed
"""
- return set_value(_CORRELATION_CONTEXT_KEY, {}, context=context)
+ return set_value(_BAGGAGE_KEY, {}, context=context)
diff --git a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
similarity index 51%
rename from opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py
rename to opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
index fca9465fbb3..fb14ab95672 100644
--- a/opentelemetry-api/src/opentelemetry/correlationcontext/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -15,55 +15,53 @@
import typing
import urllib.parse
-from opentelemetry import correlationcontext
+from opentelemetry import baggage
from opentelemetry.context import get_current
from opentelemetry.context.context import Context
-from opentelemetry.trace.propagation import httptextformat
+from opentelemetry.trace.propagation import textmap
-class CorrelationContextPropagator(httptextformat.HTTPTextFormat):
+class BaggagePropagator(textmap.TextMapPropagator):
MAX_HEADER_LENGTH = 8192
MAX_PAIR_LENGTH = 4096
MAX_PAIRS = 180
- _CORRELATION_CONTEXT_HEADER_NAME = "otcorrelationcontext"
+ _BAGGAGE_HEADER_NAME = "otcorrelations"
def extract(
self,
- get_from_carrier: httptextformat.Getter[
- httptextformat.HTTPTextFormatT
- ],
- carrier: httptextformat.HTTPTextFormatT,
+ get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
- """Extract CorrelationContext from the carrier.
+ """Extract Baggage from the carrier.
See
- `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
+ `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`
"""
if context is None:
context = get_current()
header = _extract_first_element(
- get_from_carrier(carrier, self._CORRELATION_CONTEXT_HEADER_NAME)
+ get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME)
)
if not header or len(header) > self.MAX_HEADER_LENGTH:
return context
- correlations = header.split(",")
- total_correlations = self.MAX_PAIRS
- for correlation in correlations:
- if total_correlations <= 0:
+ baggage_entries = header.split(",")
+ total_baggage_entries = self.MAX_PAIRS
+ for entry in baggage_entries:
+ if total_baggage_entries <= 0:
return context
- total_correlations -= 1
- if len(correlation) > self.MAX_PAIR_LENGTH:
+ total_baggage_entries -= 1
+ if len(entry) > self.MAX_PAIR_LENGTH:
continue
try:
- name, value = correlation.split("=", 1)
+ name, value = entry.split("=", 1)
except Exception: # pylint: disable=broad-except
continue
- context = correlationcontext.set_correlation(
+ context = baggage.set_baggage(
urllib.parse.unquote(name).strip(),
urllib.parse.unquote(value).strip(),
context=context,
@@ -73,37 +71,35 @@ def extract(
def inject(
self,
- set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
- carrier: httptextformat.HTTPTextFormatT,
+ set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
- """Injects CorrelationContext into the carrier.
+ """Injects Baggage into the carrier.
See
- `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
+ `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`
"""
- correlations = correlationcontext.get_correlations(context=context)
- if not correlations:
+ baggage_entries = baggage.get_all(context=context)
+ if not baggage_entries:
return
- correlation_context_string = _format_correlations(correlations)
+ baggage_string = _format_baggage(baggage_entries)
set_in_carrier(
- carrier,
- self._CORRELATION_CONTEXT_HEADER_NAME,
- correlation_context_string,
+ carrier, self._BAGGAGE_HEADER_NAME, baggage_string,
)
-def _format_correlations(correlations: typing.Dict[str, object]) -> str:
+def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
return ",".join(
key + "=" + urllib.parse.quote_plus(str(value))
- for key, value in correlations.items()
+ for key, value in baggage_entries.items()
)
def _extract_first_element(
- items: typing.Iterable[httptextformat.HTTPTextFormatT],
-) -> typing.Optional[httptextformat.HTTPTextFormatT]:
+ items: typing.Iterable[textmap.TextMapPropagatorT],
+) -> typing.Optional[textmap.TextMapPropagatorT]:
if items is None:
return None
return next(iter(items), None)
diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py
index 5aa53e25dce..f34e3c588b4 100644
--- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py
@@ -22,7 +22,7 @@
from opentelemetry import propagators
- PROPAGATOR = propagators.get_global_httptextformat()
+ PROPAGATOR = propagators.get_global_textmap()
def get_header_from_flask_request(request, key):
@@ -55,22 +55,18 @@ def example_route():
import typing
-import opentelemetry.trace as trace
-from opentelemetry.context import get_current
+from opentelemetry.baggage.propagation import BaggagePropagator
from opentelemetry.context.context import Context
-from opentelemetry.correlationcontext.propagation import (
- CorrelationContextPropagator,
-)
from opentelemetry.propagators import composite
-from opentelemetry.trace.propagation import httptextformat
-from opentelemetry.trace.propagation.tracecontexthttptextformat import (
- TraceContextHTTPTextFormat,
+from opentelemetry.trace.propagation import textmap
+from opentelemetry.trace.propagation.tracecontext import (
+ TraceContextTextMapPropagator,
)
def extract(
- get_from_carrier: httptextformat.Getter[httptextformat.HTTPTextFormatT],
- carrier: httptextformat.HTTPTextFormatT,
+ get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
""" Uses the configured propagator to extract a Context from the carrier.
@@ -86,14 +82,12 @@ def extract(
context: an optional Context to use. Defaults to current
context if not set.
"""
- return get_global_httptextformat().extract(
- get_from_carrier, carrier, context
- )
+ return get_global_textmap().extract(get_from_carrier, carrier, context)
def inject(
- set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
- carrier: httptextformat.HTTPTextFormatT,
+ set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
""" Uses the configured propagator to inject a Context into the carrier.
@@ -107,20 +101,18 @@ def inject(
context: an optional Context to use. Defaults to current
context if not set.
"""
- get_global_httptextformat().inject(set_in_carrier, carrier, context)
+ get_global_textmap().inject(set_in_carrier, carrier, context)
_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(
- [TraceContextHTTPTextFormat(), CorrelationContextPropagator()],
-) # type: httptextformat.HTTPTextFormat
+ [TraceContextTextMapPropagator(), BaggagePropagator()],
+) # type: textmap.TextMapPropagator
-def get_global_httptextformat() -> httptextformat.HTTPTextFormat:
+def get_global_textmap() -> textmap.TextMapPropagator:
return _HTTP_TEXT_FORMAT
-def set_global_httptextformat(
- http_text_format: httptextformat.HTTPTextFormat,
-) -> None:
+def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:
global _HTTP_TEXT_FORMAT # pylint:disable=global-statement
_HTTP_TEXT_FORMAT = http_text_format
diff --git a/opentelemetry-api/src/opentelemetry/propagators/composite.py b/opentelemetry-api/src/opentelemetry/propagators/composite.py
index 50fba01423b..3499d2ea08a 100644
--- a/opentelemetry-api/src/opentelemetry/propagators/composite.py
+++ b/opentelemetry-api/src/opentelemetry/propagators/composite.py
@@ -15,12 +15,12 @@
import typing
from opentelemetry.context.context import Context
-from opentelemetry.trace.propagation import httptextformat
+from opentelemetry.trace.propagation import textmap
logger = logging.getLogger(__name__)
-class CompositeHTTPPropagator(httptextformat.HTTPTextFormat):
+class CompositeHTTPPropagator(textmap.TextMapPropagator):
""" CompositeHTTPPropagator provides a mechanism for combining multiple
propagators into a single one.
@@ -29,16 +29,14 @@ class CompositeHTTPPropagator(httptextformat.HTTPTextFormat):
"""
def __init__(
- self, propagators: typing.Sequence[httptextformat.HTTPTextFormat]
+ self, propagators: typing.Sequence[textmap.TextMapPropagator]
) -> None:
self._propagators = propagators
def extract(
self,
- get_from_carrier: httptextformat.Getter[
- httptextformat.HTTPTextFormatT
- ],
- carrier: httptextformat.HTTPTextFormatT,
+ get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
""" Run each of the configured propagators with the given context and carrier.
@@ -46,7 +44,7 @@ def extract(
propagators write the same context key, the propagator later in the list
will override previous propagators.
- See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
+ See `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`
"""
for propagator in self._propagators:
context = propagator.extract(get_from_carrier, carrier, context)
@@ -54,8 +52,8 @@ def extract(
def inject(
self,
- set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
- carrier: httptextformat.HTTPTextFormatT,
+ set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
""" Run each of the configured propagators with the given context and carrier.
@@ -63,7 +61,7 @@ def inject(
propagators write the same carrier key, the propagator later in the list
will override previous propagators.
- See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
+ See `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`
"""
for propagator in self._propagators:
propagator.inject(set_in_carrier, carrier, context)
diff --git a/opentelemetry-api/src/opentelemetry/trace/__init__.py b/opentelemetry-api/src/opentelemetry/trace/__init__.py
index 6c1bf46cc92..1795192254f 100644
--- a/opentelemetry-api/src/opentelemetry/trace/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/trace/__init__.py
@@ -138,28 +138,6 @@ def attributes(self) -> types.Attributes:
return self._attributes
-class LazyLink(LinkBase):
- """A lazy link to a `Span`.
-
- Args:
- context: `SpanContext` of the `Span` to link to.
- link_formatter: Callable object that returns the attributes of the
- Link.
- """
-
- def __init__(
- self,
- context: "SpanContext",
- link_formatter: types.AttributesFormatter,
- ) -> None:
- super().__init__(context)
- self._link_formatter = link_formatter
-
- @property
- def attributes(self) -> types.Attributes:
- return self._link_formatter()
-
-
class SpanKind(enum.Enum):
"""Specifies additional details on how this span relates to its parent span.
@@ -251,7 +229,7 @@ def start_span(
name: str,
parent: ParentSpan = CURRENT_SPAN,
kind: SpanKind = SpanKind.INTERNAL,
- attributes: typing.Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: typing.Sequence[Link] = (),
start_time: typing.Optional[int] = None,
set_status_on_exception: bool = True,
@@ -303,7 +281,7 @@ def start_as_current_span(
name: str,
parent: ParentSpan = CURRENT_SPAN,
kind: SpanKind = SpanKind.INTERNAL,
- attributes: typing.Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: typing.Sequence[Link] = (),
) -> typing.Iterator["Span"]:
"""Context manager for creating a new span and set it
@@ -379,7 +357,7 @@ def start_span(
name: str,
parent: ParentSpan = Tracer.CURRENT_SPAN,
kind: SpanKind = SpanKind.INTERNAL,
- attributes: typing.Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: typing.Sequence[Link] = (),
start_time: typing.Optional[int] = None,
set_status_on_exception: bool = True,
@@ -393,7 +371,7 @@ def start_as_current_span(
name: str,
parent: ParentSpan = Tracer.CURRENT_SPAN,
kind: SpanKind = SpanKind.INTERNAL,
- attributes: typing.Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: typing.Sequence[Link] = (),
) -> typing.Iterator["Span"]:
# pylint: disable=unused-argument,no-self-use
@@ -464,7 +442,6 @@ def get_tracer_provider() -> TracerProvider:
"DefaultSpan",
"DefaultTracer",
"DefaultTracerProvider",
- "LazyLink",
"Link",
"LinkBase",
"ParentSpan",
diff --git a/opentelemetry-api/src/opentelemetry/trace/propagation/httptextformat.py b/opentelemetry-api/src/opentelemetry/trace/propagation/textmap.py
similarity index 85%
rename from opentelemetry-api/src/opentelemetry/trace/propagation/httptextformat.py
rename to opentelemetry-api/src/opentelemetry/trace/propagation/textmap.py
index e15e2a0e6d6..6f9ed897e11 100644
--- a/opentelemetry-api/src/opentelemetry/trace/propagation/httptextformat.py
+++ b/opentelemetry-api/src/opentelemetry/trace/propagation/textmap.py
@@ -17,16 +17,16 @@
from opentelemetry.context.context import Context
-HTTPTextFormatT = typing.TypeVar("HTTPTextFormatT")
+TextMapPropagatorT = typing.TypeVar("TextMapPropagatorT")
-Setter = typing.Callable[[HTTPTextFormatT, str, str], None]
-Getter = typing.Callable[[HTTPTextFormatT, str], typing.List[str]]
+Setter = typing.Callable[[TextMapPropagatorT, str, str], None]
+Getter = typing.Callable[[TextMapPropagatorT, str], typing.List[str]]
-class HTTPTextFormat(abc.ABC):
+class TextMapPropagator(abc.ABC):
"""This class provides an interface that enables extracting and injecting
context into headers of HTTP requests. HTTP frameworks and clients
- can integrate with HTTPTextFormat by providing the object containing the
+ can integrate with TextMapPropagator by providing the object containing the
headers, and a getter and setter function for the extraction and
injection of values, respectively.
@@ -35,8 +35,8 @@ class HTTPTextFormat(abc.ABC):
@abc.abstractmethod
def extract(
self,
- get_from_carrier: Getter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ get_from_carrier: Getter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
"""Create a Context from values in the carrier.
@@ -63,8 +63,8 @@ def extract(
@abc.abstractmethod
def inject(
self,
- set_in_carrier: Setter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ set_in_carrier: Setter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
"""Inject values from a Context into a carrier.
diff --git a/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontexthttptextformat.py b/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontext.py
similarity index 91%
rename from opentelemetry-api/src/opentelemetry/trace/propagation/tracecontexthttptextformat.py
rename to opentelemetry-api/src/opentelemetry/trace/propagation/tracecontext.py
index 1cfd0704e23..8627b9a65cb 100644
--- a/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontexthttptextformat.py
+++ b/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontext.py
@@ -17,7 +17,7 @@
import opentelemetry.trace as trace
from opentelemetry.context.context import Context
-from opentelemetry.trace.propagation import httptextformat
+from opentelemetry.trace.propagation import textmap
# Keys and values are strings of up to 256 printable US-ASCII characters.
# Implementations should conform to the `W3C Trace Context - Tracestate`_
@@ -46,7 +46,7 @@
_TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS = 32
-class TraceContextHTTPTextFormat(httptextformat.HTTPTextFormat):
+class TraceContextTextMapPropagator(textmap.TextMapPropagator):
"""Extracts and injects using w3c TraceContext's headers.
"""
@@ -60,15 +60,13 @@ class TraceContextHTTPTextFormat(httptextformat.HTTPTextFormat):
def extract(
self,
- get_from_carrier: httptextformat.Getter[
- httptextformat.HTTPTextFormatT
- ],
- carrier: httptextformat.HTTPTextFormatT,
+ get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
"""Extracts SpanContext from the carrier.
- See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
+ See `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`
"""
header = get_from_carrier(carrier, self._TRACEPARENT_HEADER_NAME)
@@ -111,13 +109,13 @@ def extract(
def inject(
self,
- set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
- carrier: httptextformat.HTTPTextFormatT,
+ set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
+ carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
"""Injects SpanContext into the carrier.
- See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
+ See `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`
"""
span = trace.get_current_span(context)
span_context = span.get_context()
diff --git a/opentelemetry-api/src/opentelemetry/trace/sampling.py b/opentelemetry-api/src/opentelemetry/trace/sampling.py
deleted file mode 100644
index 868678b05c0..00000000000
--- a/opentelemetry-api/src/opentelemetry/trace/sampling.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-For general information about sampling, see `the specification `_.
-
-OpenTelemetry provides two types of samplers:
-
-- `StaticSampler`
-- `ProbabilitySampler`
-
-A `StaticSampler` always returns the same sampling decision regardless of the conditions. Both possible StaticSamplers are already created:
-
-- Always sample spans: `ALWAYS_ON`
-- Never sample spans: `ALWAYS_OFF`
-
-A `ProbabilitySampler` makes a random sampling decision based on the sampling probability given. If the span being sampled has a parent, `ProbabilitySampler` will respect the parent span's sampling decision.
-
-Currently, sampling decisions are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 `_).
-
-Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample`.
-
-To use a sampler, pass it into the tracer provider constructor. For example:
-
-.. code:: python
-
- from opentelemetry import trace
- from opentelemetry.trace.sampling import ProbabilitySampler
- from opentelemetry.sdk.trace import TracerProvider
- from opentelemetry.sdk.trace.export import (
- ConsoleSpanExporter,
- SimpleExportSpanProcessor,
- )
-
- # sample 1 in every 1000 traces
- sampler = ProbabilitySampler(1/1000)
-
- # set the sampler onto the global tracer provider
- trace.set_tracer_provider(TracerProvider(sampler=sampler))
-
- # set up an exporter for sampled spans
- trace.get_tracer_provider().add_span_processor(
- SimpleExportSpanProcessor(ConsoleSpanExporter())
- )
-
- # created spans will now be sampled by the ProbabilitySampler
- with trace.get_tracer(__name__).start_as_current_span("Test Span"):
- ...
-"""
-import abc
-from typing import Dict, Mapping, Optional, Sequence
-
-# pylint: disable=unused-import
-from opentelemetry.trace import Link, SpanContext
-from opentelemetry.util.types import Attributes, AttributeValue
-
-
-class Decision:
- """A sampling decision as applied to a newly-created Span.
-
- Args:
- sampled: Whether the `opentelemetry.trace.Span` should be sampled.
- attributes: Attributes to add to the `opentelemetry.trace.Span`.
- """
-
- def __repr__(self) -> str:
- return "{}({}, attributes={})".format(
- type(self).__name__, str(self.sampled), str(self.attributes)
- )
-
- def __init__(
- self,
- sampled: bool = False,
- attributes: Optional[Mapping[str, "AttributeValue"]] = None,
- ) -> None:
- self.sampled = sampled # type: bool
- if attributes is None:
- self.attributes = {} # type: Dict[str, "AttributeValue"]
- else:
- self.attributes = dict(attributes)
-
-
-class Sampler(abc.ABC):
- @abc.abstractmethod
- def should_sample(
- self,
- parent_context: Optional["SpanContext"],
- trace_id: int,
- span_id: int,
- name: str,
- attributes: Optional[Attributes] = None,
- links: Sequence["Link"] = (),
- ) -> "Decision":
- pass
-
-
-class StaticSampler(Sampler):
- """Sampler that always returns the same decision."""
-
- def __init__(self, decision: "Decision"):
- self._decision = decision
-
- def should_sample(
- self,
- parent_context: Optional["SpanContext"],
- trace_id: int,
- span_id: int,
- name: str,
- attributes: Optional[Attributes] = None,
- links: Sequence["Link"] = (),
- ) -> "Decision":
- return self._decision
-
-
-class ProbabilitySampler(Sampler):
- """
- Sampler that makes sampling decisions probabalistically based on `rate`,
- while also respecting the parent span sampling decision.
-
- Args:
- rate: Probability (between 0 and 1) that a span will be sampled
- """
-
- def __init__(self, rate: float):
- self._rate = rate
- self._bound = self.get_bound_for_rate(self._rate)
-
- # For compatibility with 64 bit trace IDs, the sampler checks the 64
- # low-order bits of the trace ID to decide whether to sample a given trace.
- TRACE_ID_LIMIT = (1 << 64) - 1
-
- @classmethod
- def get_bound_for_rate(cls, rate: float) -> int:
- return round(rate * (cls.TRACE_ID_LIMIT + 1))
-
- @property
- def rate(self) -> float:
- return self._rate
-
- @rate.setter
- def rate(self, new_rate: float) -> None:
- self._rate = new_rate
- self._bound = self.get_bound_for_rate(self._rate)
-
- @property
- def bound(self) -> int:
- return self._bound
-
- def should_sample(
- self,
- parent_context: Optional["SpanContext"],
- trace_id: int,
- span_id: int,
- name: str,
- attributes: Optional[Attributes] = None, # TODO
- links: Sequence["Link"] = (),
- ) -> "Decision":
- if parent_context is not None:
- return Decision(parent_context.trace_flags.sampled)
-
- return Decision(trace_id & self.TRACE_ID_LIMIT < self.bound)
-
-
-ALWAYS_OFF = StaticSampler(Decision(False))
-"""Sampler that never samples spans, regardless of the parent span's sampling decision."""
-
-ALWAYS_ON = StaticSampler(Decision(True))
-"""Sampler that always samples spans, regardless of the parent span's sampling decision."""
-
-
-DEFAULT_OFF = ProbabilitySampler(0.0)
-"""Sampler that respects its parent span's sampling decision, but otherwise never samples."""
-
-DEFAULT_ON = ProbabilitySampler(1.0)
-"""Sampler that respects its parent span's sampling decision, but otherwise always samples."""
diff --git a/opentelemetry-api/src/opentelemetry/trace/span.py b/opentelemetry-api/src/opentelemetry/trace/span.py
index d207ecf565b..27bbc223368 100644
--- a/opentelemetry-api/src/opentelemetry/trace/span.py
+++ b/opentelemetry-api/src/opentelemetry/trace/span.py
@@ -51,19 +51,6 @@ def add_event(
timestamp if the `timestamp` argument is omitted.
"""
- @abc.abstractmethod
- def add_lazy_event(
- self,
- name: str,
- event_formatter: types.AttributesFormatter,
- timestamp: typing.Optional[int] = None,
- ) -> None:
- """Adds an `Event`.
- Adds a single `Event` with the name, an event formatter that calculates
- the attributes lazily and, optionally, a timestamp. Implementations
- should generate a timestamp if the `timestamp` argument is omitted.
- """
-
@abc.abstractmethod
def update_name(self, name: str) -> None:
"""Updates the `Span` name.
@@ -75,7 +62,7 @@ def update_name(self, name: str) -> None:
"""
@abc.abstractmethod
- def is_recording_events(self) -> bool:
+ def is_recording(self) -> bool:
"""Returns whether this span will be recorded.
Returns true if this Span is active and recording information like
@@ -187,6 +174,10 @@ def __init__(
self.trace_flags = trace_flags
self.trace_state = trace_state
self.is_remote = is_remote
+ self.is_valid = (
+ self.trace_id != INVALID_TRACE_ID
+ and self.span_id != INVALID_SPAN_ID
+ )
def __repr__(self) -> str:
return (
@@ -199,20 +190,6 @@ def __repr__(self) -> str:
self.is_remote,
)
- def is_valid(self) -> bool:
- """Get whether this `SpanContext` is valid.
-
- A `SpanContext` is said to be invalid if its trace ID or span ID is
- invalid (i.e. ``0``).
-
- Returns:
- True if the `SpanContext` is valid, false otherwise.
- """
- return (
- self.trace_id != INVALID_TRACE_ID
- and self.span_id != INVALID_SPAN_ID
- )
-
class DefaultSpan(Span):
"""The default Span that is used when no Span implementation is available.
@@ -226,7 +203,7 @@ def __init__(self, context: "SpanContext") -> None:
def get_context(self) -> "SpanContext":
return self._context
- def is_recording_events(self) -> bool:
+ def is_recording(self) -> bool:
return False
def end(self, end_time: typing.Optional[int] = None) -> None:
@@ -243,14 +220,6 @@ def add_event(
) -> None:
pass
- def add_lazy_event(
- self,
- name: str,
- event_formatter: types.AttributesFormatter,
- timestamp: typing.Optional[int] = None,
- ) -> None:
- pass
-
def update_name(self, name: str) -> None:
pass
diff --git a/opentelemetry-api/tests/baggage/test_baggage.py b/opentelemetry-api/tests/baggage/test_baggage.py
new file mode 100644
index 00000000000..276d2bc8b0a
--- /dev/null
+++ b/opentelemetry-api/tests/baggage/test_baggage.py
@@ -0,0 +1,69 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from opentelemetry import baggage, context
+
+
+class TestBaggageManager(unittest.TestCase):
+ def test_set_baggage(self):
+ self.assertEqual({}, baggage.get_all())
+
+ ctx = baggage.set_baggage("test", "value")
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value")
+
+ ctx = baggage.set_baggage("test", "value2", context=ctx)
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value2")
+
+ def test_baggages_current_context(self):
+ token = context.attach(baggage.set_baggage("test", "value"))
+ self.assertEqual(baggage.get_baggage("test"), "value")
+ context.detach(token)
+ self.assertEqual(baggage.get_baggage("test"), None)
+
+ def test_set_multiple_baggage_entries(self):
+ ctx = baggage.set_baggage("test", "value")
+ ctx = baggage.set_baggage("test2", "value2", context=ctx)
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value")
+ self.assertEqual(baggage.get_baggage("test2", context=ctx), "value2")
+ self.assertEqual(
+ baggage.get_all(context=ctx), {"test": "value", "test2": "value2"},
+ )
+
+ def test_modifying_baggage(self):
+ ctx = baggage.set_baggage("test", "value")
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value")
+ baggage_entries = baggage.get_all(context=ctx)
+ with self.assertRaises(TypeError):
+ baggage_entries["test"] = "mess-this-up"
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value")
+
+ def test_remove_baggage_entry(self):
+ self.assertEqual({}, baggage.get_all())
+
+ ctx = baggage.set_baggage("test", "value")
+ ctx = baggage.set_baggage("test2", "value2", context=ctx)
+ ctx = baggage.remove_baggage("test", context=ctx)
+ self.assertEqual(baggage.get_baggage("test", context=ctx), None)
+ self.assertEqual(baggage.get_baggage("test2", context=ctx), "value2")
+
+ def test_clear_baggage(self):
+ self.assertEqual({}, baggage.get_all())
+
+ ctx = baggage.set_baggage("test", "value")
+ self.assertEqual(baggage.get_baggage("test", context=ctx), "value")
+
+ ctx = baggage.clear(context=ctx)
+ self.assertEqual(baggage.get_all(context=ctx), {})
diff --git a/opentelemetry-api/tests/correlationcontext/test_correlation_context_propagation.py b/opentelemetry-api/tests/baggage/test_baggage_propagation.py
similarity index 80%
rename from opentelemetry-api/tests/correlationcontext/test_correlation_context_propagation.py
rename to opentelemetry-api/tests/baggage/test_baggage_propagation.py
index c33326b173c..e8bd45d0656 100644
--- a/opentelemetry-api/tests/correlationcontext/test_correlation_context_propagation.py
+++ b/opentelemetry-api/tests/baggage/test_baggage_propagation.py
@@ -15,11 +15,9 @@
import typing
import unittest
-from opentelemetry import correlationcontext
+from opentelemetry import baggage
+from opentelemetry.baggage.propagation import BaggagePropagator
from opentelemetry.context import get_current
-from opentelemetry.correlationcontext.propagation import (
- CorrelationContextPropagator,
-)
def get_as_list(
@@ -28,31 +26,29 @@ def get_as_list(
return dict_object.get(key, [])
-class TestCorrelationContextPropagation(unittest.TestCase):
+class TestBaggagePropagation(unittest.TestCase):
def setUp(self):
- self.propagator = CorrelationContextPropagator()
+ self.propagator = BaggagePropagator()
def _extract(self, header_value):
"""Test helper"""
- header = {"otcorrelationcontext": [header_value]}
- return correlationcontext.get_correlations(
- self.propagator.extract(get_as_list, header)
- )
+ header = {"otcorrelations": [header_value]}
+ return baggage.get_all(self.propagator.extract(get_as_list, header))
def _inject(self, values):
"""Test helper"""
ctx = get_current()
for k, v in values.items():
- ctx = correlationcontext.set_correlation(k, v, context=ctx)
+ ctx = baggage.set_baggage(k, v, context=ctx)
output = {}
self.propagator.inject(dict.__setitem__, output, context=ctx)
- return output.get("otcorrelationcontext")
+ return output.get("otcorrelations")
def test_no_context_header(self):
- correlations = correlationcontext.get_correlations(
+ baggage_entries = baggage.get_all(
self.propagator.extract(get_as_list, {})
)
- self.assertEqual(correlations, {})
+ self.assertEqual(baggage_entries, {})
def test_empty_context_header(self):
header = ""
@@ -94,7 +90,7 @@ def test_invalid_header(self):
self.assertEqual(self._extract(header), expected)
def test_header_too_long(self):
- long_value = "s" * (CorrelationContextPropagator.MAX_HEADER_LENGTH + 1)
+ long_value = "s" * (BaggagePropagator.MAX_HEADER_LENGTH + 1)
header = "key1={}".format(long_value)
expected = {}
self.assertEqual(self._extract(header), expected)
@@ -103,20 +99,20 @@ def test_header_contains_too_many_entries(self):
header = ",".join(
[
"key{}=val".format(k)
- for k in range(CorrelationContextPropagator.MAX_PAIRS + 1)
+ for k in range(BaggagePropagator.MAX_PAIRS + 1)
]
)
self.assertEqual(
- len(self._extract(header)), CorrelationContextPropagator.MAX_PAIRS
+ len(self._extract(header)), BaggagePropagator.MAX_PAIRS
)
def test_header_contains_pair_too_long(self):
- long_value = "s" * (CorrelationContextPropagator.MAX_PAIR_LENGTH + 1)
+ long_value = "s" * (BaggagePropagator.MAX_PAIR_LENGTH + 1)
header = "key1=value1,key2={},key3=value3".format(long_value)
expected = {"key1": "value1", "key3": "value3"}
self.assertEqual(self._extract(header), expected)
- def test_inject_no_correlations(self):
+ def test_inject_no_baggage_entries(self):
values = {}
output = self._inject(values)
self.assertEqual(None, output)
diff --git a/opentelemetry-api/tests/correlationcontext/test_correlation_context.py b/opentelemetry-api/tests/correlationcontext/test_correlation_context.py
deleted file mode 100644
index 31996c6913a..00000000000
--- a/opentelemetry-api/tests/correlationcontext/test_correlation_context.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-from opentelemetry import context
-from opentelemetry import correlationcontext as cctx
-
-
-class TestCorrelationContextManager(unittest.TestCase):
- def test_set_correlation(self):
- self.assertEqual({}, cctx.get_correlations())
-
- ctx = cctx.set_correlation("test", "value")
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value")
-
- ctx = cctx.set_correlation("test", "value2", context=ctx)
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value2")
-
- def test_correlations_current_context(self):
- token = context.attach(cctx.set_correlation("test", "value"))
- self.assertEqual(cctx.get_correlation("test"), "value")
- context.detach(token)
- self.assertEqual(cctx.get_correlation("test"), None)
-
- def test_set_multiple_correlations(self):
- ctx = cctx.set_correlation("test", "value")
- ctx = cctx.set_correlation("test2", "value2", context=ctx)
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value")
- self.assertEqual(cctx.get_correlation("test2", context=ctx), "value2")
- self.assertEqual(
- cctx.get_correlations(context=ctx),
- {"test": "value", "test2": "value2"},
- )
-
- def test_modifying_correlations(self):
- ctx = cctx.set_correlation("test", "value")
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value")
- correlations = cctx.get_correlations(context=ctx)
- correlations["test"] = "mess-this-up"
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value")
-
- def test_remove_correlations(self):
- self.assertEqual({}, cctx.get_correlations())
-
- ctx = cctx.set_correlation("test", "value")
- ctx = cctx.set_correlation("test2", "value2", context=ctx)
- ctx = cctx.remove_correlation("test", context=ctx)
- self.assertEqual(cctx.get_correlation("test", context=ctx), None)
- self.assertEqual(cctx.get_correlation("test2", context=ctx), "value2")
-
- def test_clear_correlations(self):
- self.assertEqual({}, cctx.get_correlations())
-
- ctx = cctx.set_correlation("test", "value")
- self.assertEqual(cctx.get_correlation("test", context=ctx), "value")
-
- ctx = cctx.clear_correlations(context=ctx)
- self.assertEqual(cctx.get_correlations(context=ctx), {})
diff --git a/opentelemetry-api/tests/propagators/test_global_httptextformat.py b/opentelemetry-api/tests/propagators/test_global_httptextformat.py
index a7e94302233..a1c58a4c3ad 100644
--- a/opentelemetry-api/tests/propagators/test_global_httptextformat.py
+++ b/opentelemetry-api/tests/propagators/test_global_httptextformat.py
@@ -15,7 +15,7 @@
import typing
import unittest
-from opentelemetry import correlationcontext, trace
+from opentelemetry import baggage, trace
from opentelemetry.propagators import extract, inject
from opentelemetry.trace import get_current_span, set_span_in_context
@@ -41,28 +41,28 @@ def test_propagation(self):
)
tracestate_value = "foo=1,bar=2,baz=3"
headers = {
- "otcorrelationcontext": ["key1=val1,key2=val2"],
+ "otcorrelations": ["key1=val1,key2=val2"],
"traceparent": [traceparent_value],
"tracestate": [tracestate_value],
}
ctx = extract(get_as_list, headers)
- correlations = correlationcontext.get_correlations(context=ctx)
+ baggage_entries = baggage.get_all(context=ctx)
expected = {"key1": "val1", "key2": "val2"}
- self.assertEqual(correlations, expected)
+ self.assertEqual(baggage_entries, expected)
span_context = get_current_span(context=ctx).get_context()
self.assertEqual(span_context.trace_id, self.TRACE_ID)
self.assertEqual(span_context.span_id, self.SPAN_ID)
span = trace.DefaultSpan(span_context)
- ctx = correlationcontext.set_correlation("key3", "val3")
- ctx = correlationcontext.set_correlation("key4", "val4", context=ctx)
+ ctx = baggage.set_baggage("key3", "val3")
+ ctx = baggage.set_baggage("key4", "val4", context=ctx)
ctx = set_span_in_context(span, context=ctx)
output = {}
inject(dict.__setitem__, output, context=ctx)
self.assertEqual(traceparent_value, output["traceparent"])
- self.assertIn("key3=val3", output["otcorrelationcontext"])
- self.assertIn("key4=val4", output["otcorrelationcontext"])
+ self.assertIn("key3=val3", output["otcorrelations"])
+ self.assertIn("key4=val4", output["otcorrelations"])
self.assertIn("foo=1", output["tracestate"])
self.assertIn("bar=2", output["tracestate"])
self.assertIn("baz=3", output["tracestate"])
diff --git a/opentelemetry-api/tests/test_implementation.py b/opentelemetry-api/tests/test_implementation.py
index d0f9404a911..0d5b22b18f5 100644
--- a/opentelemetry-api/tests/test_implementation.py
+++ b/opentelemetry-api/tests/test_implementation.py
@@ -39,13 +39,13 @@ def test_default_tracer(self):
with tracer.start_span("test") as span:
self.assertEqual(span.get_context(), trace.INVALID_SPAN_CONTEXT)
self.assertEqual(span, trace.INVALID_SPAN)
- self.assertIs(span.is_recording_events(), False)
+ self.assertIs(span.is_recording(), False)
with tracer.start_span("test2") as span2:
self.assertEqual(
span2.get_context(), trace.INVALID_SPAN_CONTEXT
)
self.assertEqual(span2, trace.INVALID_SPAN)
- self.assertIs(span2.is_recording_events(), False)
+ self.assertIs(span2.is_recording(), False)
def test_span(self):
with self.assertRaises(TypeError):
@@ -55,7 +55,7 @@ def test_span(self):
def test_default_span(self):
span = trace.DefaultSpan(trace.INVALID_SPAN_CONTEXT)
self.assertEqual(span.get_context(), trace.INVALID_SPAN_CONTEXT)
- self.assertIs(span.is_recording_events(), False)
+ self.assertIs(span.is_recording(), False)
# METER
diff --git a/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py b/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py
index 5adc180d9fc..8abe4193873 100644
--- a/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py
+++ b/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py
@@ -16,9 +16,9 @@
import unittest
from opentelemetry import trace
-from opentelemetry.trace.propagation import tracecontexthttptextformat
+from opentelemetry.trace.propagation import tracecontext
-FORMAT = tracecontexthttptextformat.TraceContextHTTPTextFormat()
+FORMAT = tracecontext.TraceContextTextMapPropagator()
def get_as_list(
diff --git a/opentelemetry-api/tests/trace/test_defaultspan.py b/opentelemetry-api/tests/trace/test_defaultspan.py
index d27f2b1bbcd..67c2fc33521 100644
--- a/opentelemetry-api/tests/trace/test_defaultspan.py
+++ b/opentelemetry-api/tests/trace/test_defaultspan.py
@@ -32,4 +32,4 @@ def test_ctor(self):
def test_invalid_span(self):
self.assertIsNotNone(trace.INVALID_SPAN)
self.assertIsNotNone(trace.INVALID_SPAN.get_context())
- self.assertFalse(trace.INVALID_SPAN.get_context().is_valid())
+ self.assertFalse(trace.INVALID_SPAN.get_context().is_valid)
diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md
index a5fe59878bd..e31387a0c40 100644
--- a/opentelemetry-sdk/CHANGELOG.md
+++ b/opentelemetry-sdk/CHANGELOG.md
@@ -2,11 +2,25 @@
## Unreleased
+- Moved samplers from API to SDK
+ ([#1023](https://github.com/open-telemetry/opentelemetry-python/pull/1023))
+- Sampling spec changes
+ ([#1034](https://github.com/open-telemetry/opentelemetry-python/pull/1034))
+- Remove lazy Event and Link API from Span interface
+ ([#1045](https://github.com/open-telemetry/opentelemetry-python/pull/1045))
+- Improve BatchExportSpanProcessor
+ ([#1062](https://github.com/open-telemetry/opentelemetry-python/pull/1062))
+- Populate resource attributes as per semantic conventions
+ ([#1053](https://github.com/open-telemetry/opentelemetry-python/pull/1053))
+- Rename Resource labels to attributes
+ ([#1082](https://github.com/open-telemetry/opentelemetry-python/pull/1082))
+
## Version 0.12b0
Released 2020-08-14
- Changed default Sampler to `ParentOrElse(AlwaysOn)`
+ ([#960](https://github.com/open-telemetry/opentelemetry-python/pull/960))
- Update environment variable names, prefix changed from `OPENTELEMETRY` to `OTEL`
([#904](https://github.com/open-telemetry/opentelemetry-python/pull/904))
- Implement Views in metrics SDK
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
index 2af8a551ee1..092f456fafe 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py
@@ -132,7 +132,7 @@ class Metric(metrics_api.Metric):
This is the class that is used to represent a metric that is to be
synchronously recorded and tracked. Synchronous instruments are called
inside a request, meaning they have an associated distributed context
- (i.e. Span context, correlation context). Multiple metric events may occur
+ (i.e. Span context, baggage). Multiple metric events may occur
for a synchronous instrument within a give collection interval.
Each metric has a set of bound metrics that are created from the metric.
@@ -478,7 +478,7 @@ class MeterProvider(metrics_api.MeterProvider):
def __init__(
self,
stateful=True,
- resource: Resource = Resource.create_empty(),
+ resource: Resource = Resource.create({}),
shutdown_on_exit: bool = True,
):
self.stateful = stateful
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
index d9752e3b3cb..e14d7811684 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
@@ -19,47 +19,65 @@
import typing
from json import dumps
+import pkg_resources
+
LabelValue = typing.Union[str, bool, int, float]
-Labels = typing.Dict[str, LabelValue]
+Attributes = typing.Dict[str, LabelValue]
logger = logging.getLogger(__name__)
+TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language"
+TELEMETRY_SDK_NAME = "telemetry.sdk.name"
+TELEMETRY_SDK_VERSION = "telemetry.sdk.version"
+
+OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
+ "opentelemetry-sdk"
+).version
+
+
class Resource:
- def __init__(self, labels: Labels):
- self._labels = labels.copy()
+ def __init__(self, attributes: Attributes):
+ self._attributes = attributes.copy()
@staticmethod
- def create(labels: Labels) -> "Resource":
- if not labels:
- return _EMPTY_RESOURCE
- return Resource(labels)
+ def create(attributes: Attributes) -> "Resource":
+ if not attributes:
+ return _DEFAULT_RESOURCE
+ return _DEFAULT_RESOURCE.merge(Resource(attributes))
@staticmethod
def create_empty() -> "Resource":
return _EMPTY_RESOURCE
@property
- def labels(self) -> Labels:
- return self._labels.copy()
+ def attributes(self) -> Attributes:
+ return self._attributes.copy()
def merge(self, other: "Resource") -> "Resource":
- merged_labels = self.labels
+ merged_attributes = self.attributes
# pylint: disable=protected-access
- for key, value in other._labels.items():
- if key not in merged_labels or merged_labels[key] == "":
- merged_labels[key] = value
- return Resource(merged_labels)
+ for key, value in other._attributes.items():
+ if key not in merged_attributes or merged_attributes[key] == "":
+ merged_attributes[key] = value
+ return Resource(merged_attributes)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Resource):
return False
- return self._labels == other._labels
+ return self._attributes == other._attributes
def __hash__(self):
- return hash(dumps(self._labels, sort_keys=True))
+ return hash(dumps(self._attributes, sort_keys=True))
_EMPTY_RESOURCE = Resource({})
+_DEFAULT_RESOURCE = Resource(
+ {
+ TELEMETRY_SDK_LANGUAGE: "python",
+ TELEMETRY_SDK_NAME: "opentelemetry",
+ TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,
+ }
+)
class ResourceDetector(abc.ABC):
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py
index 65a60025c89..13819ed35b0 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py
@@ -40,9 +40,10 @@
from opentelemetry import trace as trace_api
from opentelemetry.sdk import util
from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.trace import sampling
from opentelemetry.sdk.util import BoundedDict, BoundedList
from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
-from opentelemetry.trace import SpanContext, sampling
+from opentelemetry.trace import SpanContext
from opentelemetry.trace.propagation import SPAN_KEY
from opentelemetry.trace.status import Status, StatusCanonicalCode
from opentelemetry.util import time_ns, types
@@ -283,31 +284,6 @@ def attributes(self) -> types.Attributes:
return self._attributes
-class LazyEvent(EventBase):
- """A text annotation with a set of attributes.
-
- Args:
- name: Name of the event.
- event_formatter: Callable object that returns the attributes of the
- event.
- timestamp: Timestamp of the event. If `None` it will filled
- automatically.
- """
-
- def __init__(
- self,
- name: str,
- event_formatter: types.AttributesFormatter,
- timestamp: Optional[int] = None,
- ) -> None:
- super().__init__(name, timestamp)
- self._event_formatter = event_formatter
-
- @property
- def attributes(self) -> types.Attributes:
- return self._event_formatter()
-
-
def _is_valid_attribute_value(value: types.AttributeValue) -> bool:
"""Checks if attribute value is valid.
@@ -350,6 +326,16 @@ def _is_valid_attribute_value(value: types.AttributeValue) -> bool:
return True
+def _filter_attribute_values(attributes: types.Attributes):
+ if attributes:
+ for attr_key, attr_value in list(attributes.items()):
+ if _is_valid_attribute_value(attr_value):
+ if isinstance(attr_value, MutableSequence):
+ attributes[attr_key] = tuple(attr_value)
+ else:
+ attributes.pop(attr_key)
+
+
class Span(trace_api.Span):
"""See `opentelemetry.trace.Span`.
@@ -378,7 +364,7 @@ def __init__(
parent: Optional[trace_api.SpanContext] = None,
sampler: Optional[sampling.Sampler] = None,
trace_config: None = None, # TODO
- resource: Resource = Resource.create_empty(),
+ resource: Resource = Resource.create({}),
attributes: types.Attributes = None, # TODO
events: Sequence[Event] = None, # TODO
links: Sequence[trace_api.Link] = (),
@@ -401,7 +387,7 @@ def __init__(
self.status = None
self._lock = threading.Lock()
- self._filter_attribute_values(attributes)
+ _filter_attribute_values(attributes)
if not attributes:
self.attributes = self._new_attributes()
else:
@@ -412,7 +398,7 @@ def __init__(
self.events = self._new_events()
if events:
for event in events:
- self._filter_attribute_values(event.attributes)
+ _filter_attribute_values(event.attributes)
self.events.append(event)
if links is None:
@@ -520,7 +506,7 @@ def to_json(self, indent=4):
f_span["attributes"] = self._format_attributes(self.attributes)
f_span["events"] = self._format_events(self.events)
f_span["links"] = self._format_links(self.links)
- f_span["resource"] = self.resource.labels
+ f_span["resource"] = self.resource.attributes
return json.dumps(f_span, indent=indent)
@@ -529,7 +515,7 @@ def get_context(self):
def set_attribute(self, key: str, value: types.AttributeValue) -> None:
with self._lock:
- if not self.is_recording_events():
+ if not self.is_recording():
return
has_ended = self.end_time is not None
if has_ended:
@@ -553,21 +539,9 @@ def set_attribute(self, key: str, value: types.AttributeValue) -> None:
with self._lock:
self.attributes[key] = value
- @staticmethod
- def _filter_attribute_values(attributes: types.Attributes):
- if attributes:
- for attr_key, attr_value in list(attributes.items()):
- if _is_valid_attribute_value(attr_value):
- if isinstance(attr_value, MutableSequence):
- attributes[attr_key] = tuple(attr_value)
- else:
- attributes[attr_key] = attr_value
- else:
- attributes.pop(attr_key)
-
def _add_event(self, event: EventBase) -> None:
with self._lock:
- if not self.is_recording_events():
+ if not self.is_recording():
return
has_ended = self.end_time is not None
@@ -582,7 +556,7 @@ def add_event(
attributes: types.Attributes = None,
timestamp: Optional[int] = None,
) -> None:
- self._filter_attribute_values(attributes)
+ _filter_attribute_values(attributes)
if not attributes:
attributes = self._new_attributes()
self._add_event(
@@ -593,23 +567,9 @@ def add_event(
)
)
- def add_lazy_event(
- self,
- name: str,
- event_formatter: types.AttributesFormatter,
- timestamp: Optional[int] = None,
- ) -> None:
- self._add_event(
- LazyEvent(
- name=name,
- event_formatter=event_formatter,
- timestamp=time_ns() if timestamp is None else timestamp,
- )
- )
-
def start(self, start_time: Optional[int] = None) -> None:
with self._lock:
- if not self.is_recording_events():
+ if not self.is_recording():
return
has_started = self.start_time is not None
if not has_started:
@@ -623,7 +583,7 @@ def start(self, start_time: Optional[int] = None) -> None:
def end(self, end_time: Optional[int] = None) -> None:
with self._lock:
- if not self.is_recording_events():
+ if not self.is_recording():
return
if self.start_time is None:
raise RuntimeError("Calling end() on a not started span.")
@@ -650,7 +610,7 @@ def update_name(self, name: str) -> None:
return
self.name = name
- def is_recording_events(self) -> bool:
+ def is_recording(self) -> bool:
return True
def set_status(self, status: trace_api.Status) -> None:
@@ -743,7 +703,7 @@ def start_as_current_span(
name: str,
parent: trace_api.ParentSpan = trace_api.Tracer.CURRENT_SPAN,
kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
- attributes: Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: Sequence[trace_api.Link] = (),
) -> Iterator[trace_api.Span]:
span = self.start_span(name, parent, kind, attributes, links)
@@ -754,7 +714,7 @@ def start_span( # pylint: disable=too-many-locals
name: str,
parent: trace_api.ParentSpan = trace_api.Tracer.CURRENT_SPAN,
kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
- attributes: Optional[types.Attributes] = None,
+ attributes: types.Attributes = None,
links: Sequence[trace_api.Link] = (),
start_time: Optional[int] = None,
set_status_on_exception: bool = True,
@@ -771,7 +731,7 @@ def start_span( # pylint: disable=too-many-locals
):
raise TypeError("parent must be a Span, SpanContext or None.")
- if parent_context is None or not parent_context.is_valid():
+ if parent_context is None or not parent_context.is_valid:
parent = parent_context = None
trace_id = generate_trace_id()
trace_flags = None
@@ -781,6 +741,20 @@ def start_span( # pylint: disable=too-many-locals
trace_flags = parent_context.trace_flags
trace_state = parent_context.trace_state
+ # The sampler decides whether to create a real or no-op span at the
+ # time of span creation. No-op spans do not record events, and are not
+ # exported.
+ # The sampler may also add attributes to the newly-created span, e.g.
+ # to include information about the sampling result.
+ sampling_result = self.source.sampler.should_sample(
+ parent_context, trace_id, name, attributes, links,
+ )
+
+ trace_flags = (
+ trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED)
+ if sampling_result.decision.is_sampled()
+ else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT)
+ )
context = trace_api.SpanContext(
trace_id,
generate_span_id(),
@@ -789,29 +763,8 @@ def start_span( # pylint: disable=too-many-locals
trace_state=trace_state,
)
- # The sampler decides whether to create a real or no-op span at the
- # time of span creation. No-op spans do not record events, and are not
- # exported.
- # The sampler may also add attributes to the newly-created span, e.g.
- # to include information about the sampling decision.
- sampling_decision = self.source.sampler.should_sample(
- parent_context,
- context.trace_id,
- context.span_id,
- name,
- attributes,
- links,
- )
-
- if sampling_decision.sampled:
- options = context.trace_flags | trace_api.TraceFlags.SAMPLED
- context.trace_flags = trace_api.TraceFlags(options)
- if attributes is None:
- span_attributes = sampling_decision.attributes
- else:
- # apply sampling decision attributes after initial attributes
- span_attributes = attributes.copy()
- span_attributes.update(sampling_decision.attributes)
+ # Only record if is_recording() is true
+ if sampling_result.decision.is_recording():
# pylint:disable=protected-access
span = Span(
name=name,
@@ -819,7 +772,7 @@ def start_span( # pylint: disable=too-many-locals
parent=parent_context,
sampler=self.source.sampler,
resource=self.source.resource,
- attributes=span_attributes,
+ attributes=sampling_result.attributes.copy(),
span_processor=self.source._active_span_processor,
kind=kind,
links=links,
@@ -867,8 +820,8 @@ def use_span(
class TracerProvider(trace_api.TracerProvider):
def __init__(
self,
- sampler: sampling.Sampler = trace_api.sampling.DEFAULT_ON,
- resource: Resource = Resource.create_empty(),
+ sampler: sampling.Sampler = sampling.DEFAULT_ON,
+ resource: Resource = Resource.create({}),
shutdown_on_exit: bool = True,
active_span_processor: Union[
SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
index 9fe55ed7fd4..857537b90a1 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
@@ -20,8 +20,8 @@
import typing
from enum import Enum
-from opentelemetry.context import attach, detach, get_current, set_value
-from opentelemetry.trace import DefaultSpan
+from opentelemetry.context import attach, detach, set_value
+from opentelemetry.sdk.trace import sampling
from opentelemetry.util import time_ns
from .. import Span, SpanProcessor
@@ -75,6 +75,8 @@ def on_start(self, span: Span) -> None:
pass
def on_end(self, span: Span) -> None:
+ if not span.context.trace_flags.sampled:
+ return
token = attach(set_value("suppress_instrumentation", True))
try:
self.span_exporter.export((span,))
@@ -91,6 +93,16 @@ def force_flush(self, timeout_millis: int = 30000) -> bool:
return True
+class _FlushRequest:
+ """Represents a request for the BatchExportSpanProcessor to flush spans."""
+
+ __slots__ = ["event", "num_spans"]
+
+ def __init__(self):
+ self.event = threading.Event()
+ self.num_spans = 0
+
+
class BatchExportSpanProcessor(SpanProcessor):
"""Batch span processor implementation.
@@ -98,8 +110,6 @@ class BatchExportSpanProcessor(SpanProcessor):
batches ended spans and pushes them to the configured `SpanExporter`.
"""
- _FLUSH_TOKEN_SPAN = DefaultSpan(context=None)
-
def __init__(
self,
span_exporter: SpanExporter,
@@ -129,9 +139,7 @@ def __init__(
) # type: typing.Deque[Span]
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
self.condition = threading.Condition(threading.Lock())
- self.flush_condition = threading.Condition(threading.Lock())
- # flag to indicate that there is a flush operation on progress
- self._flushing = False
+ self._flush_request = None # type: typing.Optional[_FlushRequest]
self.schedule_delay_millis = schedule_delay_millis
self.max_export_batch_size = max_export_batch_size
self.max_queue_size = max_queue_size
@@ -151,6 +159,8 @@ def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
+ if not span.context.trace_flags.sampled:
+ return
if len(self.queue) == self.max_queue_size:
if not self._spans_dropped:
logger.warning("Queue is full, likely spans will be dropped.")
@@ -164,60 +174,128 @@ def on_end(self, span: Span) -> None:
def worker(self):
timeout = self.schedule_delay_millis / 1e3
+ flush_request = None # type: typing.Optional[_FlushRequest]
while not self.done:
- if (
- len(self.queue) < self.max_export_batch_size
- and not self._flushing
- ):
- with self.condition:
+ with self.condition:
+ if self.done:
+ # done flag may have changed, avoid waiting
+ break
+ flush_request = self._get_and_unset_flush_request()
+ if (
+ len(self.queue) < self.max_export_batch_size
+ and flush_request is None
+ ):
+
self.condition.wait(timeout)
+ flush_request = self._get_and_unset_flush_request()
if not self.queue:
# spurious notification, let's wait again
+ self._notify_flush_request_finished(flush_request)
+ flush_request = None
continue
if self.done:
# missing spans will be sent when calling flush
break
- # substract the duration of this export call to the next timeout
+ # subtract the duration of this export call to the next timeout
start = time_ns()
- self.export()
+ self._export(flush_request)
end = time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
+ self._notify_flush_request_finished(flush_request)
+ flush_request = None
+
+ # there might have been a new flush request while export was running
+ # and before the done flag switched to true
+ with self.condition:
+ shutdown_flush_request = self._get_and_unset_flush_request()
+
# be sure that all spans are sent
self._drain_queue()
+ self._notify_flush_request_finished(flush_request)
+ self._notify_flush_request_finished(shutdown_flush_request)
+
+ def _get_and_unset_flush_request(self,) -> typing.Optional[_FlushRequest]:
+ """Returns the current flush request and makes it invisible to the
+ worker thread for subsequent calls.
+ """
+ flush_request = self._flush_request
+ self._flush_request = None
+ if flush_request is not None:
+ flush_request.num_spans = len(self.queue)
+ return flush_request
+
+ @staticmethod
+ def _notify_flush_request_finished(
+ flush_request: typing.Optional[_FlushRequest],
+ ):
+ """Notifies the flush initiator(s) waiting on the given request/event
+ that the flush operation was finished.
+ """
+ if flush_request is not None:
+ flush_request.event.set()
- def export(self) -> None:
- """Exports at most max_export_batch_size spans."""
+ def _get_or_create_flush_request(self) -> _FlushRequest:
+ """Either returns the current active flush event or creates a new one.
+
+ The flush event will be visible and read by the worker thread before an
+ export operation starts. Callers of a flush operation may wait on the
+ returned event to be notified when the flush/export operation was
+ finished.
+
+ This method is not thread-safe, i.e. callers need to take care about
+ synchronization/locking.
+ """
+ if self._flush_request is None:
+ self._flush_request = _FlushRequest()
+ return self._flush_request
+
+ def _export(self, flush_request: typing.Optional[_FlushRequest]):
+ """Exports spans considering the given flush_request.
+
+ In case of a given flush_requests spans are exported in batches until
+ the number of exported spans reached or exceeded the number of spans in
+ the flush request.
+ In no flush_request was given at most max_export_batch_size spans are
+ exported.
+ """
+ if not flush_request:
+ self._export_batch()
+ return
+
+ num_spans = flush_request.num_spans
+ while self.queue:
+ num_exported = self._export_batch()
+ num_spans -= num_exported
+
+ if num_spans <= 0:
+ break
+
+ def _export_batch(self) -> int:
+ """Exports at most max_export_batch_size spans and returns the number of
+ exported spans.
+ """
idx = 0
- notify_flush = False
# currently only a single thread acts as consumer, so queue.pop() will
# not raise an exception
while idx < self.max_export_batch_size and self.queue:
- span = self.queue.pop()
- if span is self._FLUSH_TOKEN_SPAN:
- notify_flush = True
- else:
- self.spans_list[idx] = span
- idx += 1
+ self.spans_list[idx] = self.queue.pop()
+ idx += 1
token = attach(set_value("suppress_instrumentation", True))
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
- # pylint: disable=broad-except
- except Exception:
+ except Exception: # pylint: disable=broad-except
logger.exception("Exception while exporting Span batch.")
detach(token)
- if notify_flush:
- with self.flush_condition:
- self.flush_condition.notify()
-
# clean up list
for index in range(idx):
self.spans_list[index] = None
+ return idx
def _drain_queue(self):
""""Export all elements until queue is empty.
@@ -226,26 +304,20 @@ def _drain_queue(self):
`export` that is not thread safe.
"""
while self.queue:
- self.export()
+ self._export_batch()
def force_flush(self, timeout_millis: int = 30000) -> bool:
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
- self._flushing = True
- self.queue.appendleft(self._FLUSH_TOKEN_SPAN)
-
- # wake up worker thread
with self.condition:
+ flush_request = self._get_or_create_flush_request()
+ # signal the worker thread to flush and wait for it to finish
self.condition.notify_all()
# wait for token to be processed
- with self.flush_condition:
- ret = self.flush_condition.wait(timeout_millis / 1e3)
-
- self._flushing = False
-
+ ret = flush_request.event.wait(timeout_millis / 1e3)
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py
index 901a5772f83..f6d3345ed77 100644
--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py
@@ -18,15 +18,15 @@
import opentelemetry.trace as trace
from opentelemetry.context import Context
from opentelemetry.sdk.trace import generate_span_id, generate_trace_id
-from opentelemetry.trace.propagation.httptextformat import (
+from opentelemetry.trace.propagation.textmap import (
Getter,
- HTTPTextFormat,
- HTTPTextFormatT,
Setter,
+ TextMapPropagator,
+ TextMapPropagatorT,
)
-class B3Format(HTTPTextFormat):
+class B3Format(TextMapPropagator):
"""Propagator for the B3 HTTP header format.
See: https://github.com/openzipkin/b3-propagation
@@ -44,8 +44,8 @@ class B3Format(HTTPTextFormat):
def extract(
self,
- get_from_carrier: Getter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ get_from_carrier: Getter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
trace_id = format_trace_id(trace.INVALID_TRACE_ID)
@@ -134,8 +134,8 @@ def extract(
def inject(
self,
- set_in_carrier: Setter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ set_in_carrier: Setter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
span = trace.get_current_span(context=context)
@@ -170,8 +170,8 @@ def format_span_id(span_id: int) -> str:
def _extract_first_element(
- items: typing.Iterable[HTTPTextFormatT],
-) -> typing.Optional[HTTPTextFormatT]:
+ items: typing.Iterable[TextMapPropagatorT],
+) -> typing.Optional[TextMapPropagatorT]:
if items is None:
return None
return next(iter(items), None)
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py
new file mode 100644
index 00000000000..2a1d6e89db3
--- /dev/null
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py
@@ -0,0 +1,259 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+For general information about sampling, see `the specification `_.
+
+OpenTelemetry provides two types of samplers:
+
+- `StaticSampler`
+- `TraceIdRatioBased`
+
+A `StaticSampler` always returns the same sampling result regardless of the conditions. Both possible StaticSamplers are already created:
+
+- Always sample spans: ALWAYS_ON
+- Never sample spans: ALWAYS_OFF
+
+A `TraceIdRatioBased` sampler makes a random sampling result based on the sampling probability given.
+
+If the span being sampled has a parent, `ParentBased` will respect the parent span's sampling result. Otherwise, it returns the sampling result from the given delegate sampler.
+
+Currently, sampling results are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 `_).
+
+Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample` as well as `Sampler.get_description`.
+
+To use a sampler, pass it into the tracer provider constructor. For example:
+
+.. code:: python
+
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import (
+ ConsoleSpanExporter,
+ SimpleExportSpanProcessor,
+ )
+ from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
+
+ # sample 1 in every 1000 traces
+ sampler = TraceIdRatioBased(1/1000)
+
+ # set the sampler onto the global tracer provider
+ trace.set_tracer_provider(TracerProvider(sampler=sampler))
+
+ # set up an exporter for sampled spans
+ trace.get_tracer_provider().add_span_processor(
+ SimpleExportSpanProcessor(ConsoleSpanExporter())
+ )
+
+ # created spans will now be sampled by the TraceIdRatioBased sampler
+ with trace.get_tracer(__name__).start_as_current_span("Test Span"):
+ ...
+"""
+import abc
+import enum
+from types import MappingProxyType
+from typing import Optional, Sequence
+
+# pylint: disable=unused-import
+from opentelemetry.trace import Link, SpanContext
+from opentelemetry.util.types import Attributes
+
+
+class Decision(enum.Enum):
+ # IsRecording() == false, span will not be recorded and all events and attributes will be dropped.
+ NOT_RECORD = 0
+ # IsRecording() == true, but Sampled flag MUST NOT be set.
+ RECORD = 1
+ # IsRecording() == true AND Sampled flag` MUST be set.
+ RECORD_AND_SAMPLED = 2
+
+ def is_recording(self):
+ return self in (Decision.RECORD, Decision.RECORD_AND_SAMPLED)
+
+ def is_sampled(self):
+ return self is Decision.RECORD_AND_SAMPLED
+
+
+class SamplingResult:
+ """A sampling result as applied to a newly-created Span.
+
+ Args:
+ decision: A sampling decision based off of whether the span is recorded
+ and the sampled flag in trace flags in the span context.
+ attributes: Attributes to add to the `opentelemetry.trace.Span`.
+ """
+
+ def __repr__(self) -> str:
+ return "{}({}, attributes={})".format(
+ type(self).__name__, str(self.decision), str(self.attributes)
+ )
+
+ def __init__(
+ self, decision: Decision, attributes: Attributes = None,
+ ) -> None:
+ self.decision = decision
+ if attributes is None:
+ self.attributes = MappingProxyType({})
+ else:
+ self.attributes = MappingProxyType(attributes)
+
+
+class Sampler(abc.ABC):
+ @abc.abstractmethod
+ def should_sample(
+ self,
+ parent_context: Optional["SpanContext"],
+ trace_id: int,
+ name: str,
+ attributes: Attributes = None,
+ links: Sequence["Link"] = (),
+ ) -> "SamplingResult":
+ pass
+
+ @abc.abstractmethod
+ def get_description(self) -> str:
+ pass
+
+
+class StaticSampler(Sampler):
+ """Sampler that always returns the same decision."""
+
+ def __init__(self, decision: "Decision"):
+ self._decision = decision
+
+ def should_sample(
+ self,
+ parent_context: Optional["SpanContext"],
+ trace_id: int,
+ name: str,
+ attributes: Attributes = None,
+ links: Sequence["Link"] = (),
+ ) -> "SamplingResult":
+ if self._decision is Decision.NOT_RECORD:
+ return SamplingResult(self._decision)
+ return SamplingResult(self._decision, attributes)
+
+ def get_description(self) -> str:
+ if self._decision is Decision.NOT_RECORD:
+ return "AlwaysOffSampler"
+ return "AlwaysOnSampler"
+
+
+class TraceIdRatioBased(Sampler):
+ """
+ Sampler that makes sampling decisions probabalistically based on `rate`,
+ while also respecting the parent span sampling decision.
+
+ Args:
+ rate: Probability (between 0 and 1) that a span will be sampled
+ """
+
+ def __init__(self, rate: float):
+ if rate < 0.0 or rate > 1.0:
+ raise ValueError("Probability must be in range [0.0, 1.0].")
+ self._rate = rate
+ self._bound = self.get_bound_for_rate(self._rate)
+
+ # For compatibility with 64 bit trace IDs, the sampler checks the 64
+ # low-order bits of the trace ID to decide whether to sample a given trace.
+ TRACE_ID_LIMIT = (1 << 64) - 1
+
+ @classmethod
+ def get_bound_for_rate(cls, rate: float) -> int:
+ return round(rate * (cls.TRACE_ID_LIMIT + 1))
+
+ @property
+ def rate(self) -> float:
+ return self._rate
+
+ @rate.setter
+ def rate(self, new_rate: float) -> None:
+ self._rate = new_rate
+ self._bound = self.get_bound_for_rate(self._rate)
+
+ @property
+ def bound(self) -> int:
+ return self._bound
+
+ def should_sample(
+ self,
+ parent_context: Optional["SpanContext"],
+ trace_id: int,
+ name: str,
+ attributes: Attributes = None, # TODO
+ links: Sequence["Link"] = (),
+ ) -> "SamplingResult":
+ decision = Decision.NOT_RECORD
+ if trace_id & self.TRACE_ID_LIMIT < self.bound:
+ decision = Decision.RECORD_AND_SAMPLED
+ if decision is Decision.NOT_RECORD:
+ return SamplingResult(decision)
+ return SamplingResult(decision, attributes)
+
+ def get_description(self) -> str:
+ return "TraceIdRatioBased{{{}}}".format(self._rate)
+
+
+class ParentBased(Sampler):
+ """
+ If a parent is set, follows the same sampling decision as the parent.
+ Otherwise, uses the delegate provided at initialization to make a
+ decision.
+
+ Args:
+ delegate: The delegate sampler to use if parent is not set.
+ """
+
+ def __init__(self, delegate: Sampler):
+ self._delegate = delegate
+
+ def should_sample(
+ self,
+ parent_context: Optional["SpanContext"],
+ trace_id: int,
+ name: str,
+ attributes: Attributes = None, # TODO
+ links: Sequence["Link"] = (),
+ ) -> "SamplingResult":
+ if parent_context is not None:
+ if (
+ not parent_context.is_valid
+ or not parent_context.trace_flags.sampled
+ ):
+ return SamplingResult(Decision.NOT_RECORD)
+ return SamplingResult(Decision.RECORD_AND_SAMPLED, attributes)
+
+ return self._delegate.should_sample(
+ parent_context=parent_context,
+ trace_id=trace_id,
+ name=name,
+ attributes=attributes,
+ links=links,
+ )
+
+ def get_description(self):
+ return "ParentBased{{{}}}".format(self._delegate.get_description())
+
+
+ALWAYS_OFF = StaticSampler(Decision.NOT_RECORD)
+"""Sampler that never samples spans, regardless of the parent span's sampling decision."""
+
+ALWAYS_ON = StaticSampler(Decision.RECORD_AND_SAMPLED)
+"""Sampler that always samples spans, regardless of the parent span's sampling decision."""
+
+DEFAULT_OFF = ParentBased(ALWAYS_OFF)
+"""Sampler that respects its parent span's sampling decision, but otherwise never samples."""
+
+DEFAULT_ON = ParentBased(ALWAYS_ON)
+"""Sampler that respects its parent span's sampling decision, but otherwise always samples."""
diff --git a/opentelemetry-sdk/tests/metrics/test_metrics.py b/opentelemetry-sdk/tests/metrics/test_metrics.py
index b854f2d5db9..01974765203 100644
--- a/opentelemetry-sdk/tests/metrics/test_metrics.py
+++ b/opentelemetry-sdk/tests/metrics/test_metrics.py
@@ -40,7 +40,7 @@ def test_resource_empty(self):
meter_provider = metrics.MeterProvider()
meter = meter_provider.get_meter(__name__)
# pylint: disable=protected-access
- self.assertIs(meter.resource, resources._EMPTY_RESOURCE)
+ self.assertIs(meter.resource, resources._DEFAULT_RESOURCE)
def test_start_pipeline(self):
exporter = mock.Mock()
diff --git a/opentelemetry-sdk/tests/resources/test_resources.py b/opentelemetry-sdk/tests/resources/test_resources.py
index 84d0cf2ae59..3166e3350ee 100644
--- a/opentelemetry-sdk/tests/resources/test_resources.py
+++ b/opentelemetry-sdk/tests/resources/test_resources.py
@@ -23,25 +23,35 @@
class TestResources(unittest.TestCase):
def test_create(self):
- labels = {
+ attributes = {
"service": "ui",
"version": 1,
"has_bugs": True,
"cost": 112.12,
}
- resource = resources.Resource.create(labels)
+ expected_attributes = {
+ "service": "ui",
+ "version": 1,
+ "has_bugs": True,
+ "cost": 112.12,
+ resources.TELEMETRY_SDK_NAME: "opentelemetry",
+ resources.TELEMETRY_SDK_LANGUAGE: "python",
+ resources.TELEMETRY_SDK_VERSION: resources.OPENTELEMETRY_SDK_VERSION,
+ }
+
+ resource = resources.Resource.create(attributes)
self.assertIsInstance(resource, resources.Resource)
- self.assertEqual(resource.labels, labels)
+ self.assertEqual(resource.attributes, expected_attributes)
resource = resources.Resource.create_empty()
self.assertIs(resource, resources._EMPTY_RESOURCE)
resource = resources.Resource.create(None)
- self.assertIs(resource, resources._EMPTY_RESOURCE)
+ self.assertIs(resource, resources._DEFAULT_RESOURCE)
resource = resources.Resource.create({})
- self.assertIs(resource, resources._EMPTY_RESOURCE)
+ self.assertIs(resource, resources._DEFAULT_RESOURCE)
def test_resource_merge(self):
left = resources.Resource({"service": "ui"})
@@ -54,7 +64,7 @@ def test_resource_merge(self):
def test_resource_merge_empty_string(self):
"""Verify Resource.merge behavior with the empty string.
- Labels from the source Resource take precedence, with
+ Attributes from the source Resource take precedence, with
the exception of the empty string.
"""
@@ -68,23 +78,30 @@ def test_resource_merge_empty_string(self):
)
def test_immutability(self):
- labels = {
+ attributes = {
"service": "ui",
"version": 1,
"has_bugs": True,
"cost": 112.12,
}
- labels_copy = labels.copy()
+ default_attributes = {
+ resources.TELEMETRY_SDK_NAME: "opentelemetry",
+ resources.TELEMETRY_SDK_LANGUAGE: "python",
+ resources.TELEMETRY_SDK_VERSION: resources.OPENTELEMETRY_SDK_VERSION,
+ }
+
+ attributes_copy = attributes.copy()
+ attributes_copy.update(default_attributes)
- resource = resources.Resource.create(labels)
- self.assertEqual(resource.labels, labels_copy)
+ resource = resources.Resource.create(attributes)
+ self.assertEqual(resource.attributes, attributes_copy)
- resource.labels["has_bugs"] = False
- self.assertEqual(resource.labels, labels_copy)
+ resource.attributes["has_bugs"] = False
+ self.assertEqual(resource.attributes, attributes_copy)
- labels["cost"] = 999.91
- self.assertEqual(resource.labels, labels_copy)
+ attributes["cost"] = 999.91
+ self.assertEqual(resource.attributes, attributes_copy)
def test_aggregated_resources_no_detectors(self):
aggregated_resources = resources.get_aggregated_resources([])
diff --git a/opentelemetry-sdk/tests/trace/export/test_export.py b/opentelemetry-sdk/tests/trace/export/test_export.py
index 43b7893951f..e6fcdb9c22c 100644
--- a/opentelemetry-sdk/tests/trace/export/test_export.py
+++ b/opentelemetry-sdk/tests/trace/export/test_export.py
@@ -13,8 +13,10 @@
# limitations under the License.
import os
+import threading
import time
import unittest
+from concurrent.futures import ThreadPoolExecutor
from logging import WARNING
from unittest import mock
@@ -31,11 +33,13 @@ def __init__(
destination,
max_export_batch_size=None,
export_timeout_millis=0.0,
+ export_event: threading.Event = None,
):
self.destination = destination
self.max_export_batch_size = max_export_batch_size
self.is_shutdown = False
self.export_timeout = export_timeout_millis / 1e3
+ self.export_event = export_event
def export(self, spans: trace.Span) -> export.SpanExportResult:
if (
@@ -45,6 +49,8 @@ def export(self, spans: trace.Span) -> export.SpanExportResult:
raise ValueError("Batch is too big")
time.sleep(self.export_timeout)
self.destination.extend(span.name for span in spans)
+ if self.export_event:
+ self.export_event.set()
return export.SpanExportResult.SUCCESS
def shutdown(self):
@@ -94,11 +100,35 @@ def test_simple_span_processor_no_context(self):
self.assertListEqual(["xxx", "bar", "foo"], spans_names_list)
+ def test_simple_span_processor_not_sampled(self):
+ tracer_provider = trace.TracerProvider(
+ sampler=trace.sampling.ALWAYS_OFF
+ )
+ tracer = tracer_provider.get_tracer(__name__)
+
+ spans_names_list = []
+
+ my_exporter = MySpanExporter(destination=spans_names_list)
+ span_processor = export.SimpleExportSpanProcessor(my_exporter)
+ tracer_provider.add_span_processor(span_processor)
+
+ with tracer.start_as_current_span("foo"):
+ with tracer.start_as_current_span("bar"):
+ with tracer.start_as_current_span("xxx"):
+ pass
+
+ self.assertListEqual([], spans_names_list)
+
def _create_start_and_end_span(name, span_processor):
span = trace.Span(
name,
- mock.Mock(spec=trace_api.SpanContext),
+ trace_api.SpanContext(
+ 0xDEADBEEF,
+ 0xDEADBEEF,
+ is_remote=False,
+ trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED),
+ ),
span_processor=span_processor,
)
span.start()
@@ -148,6 +178,42 @@ def test_flush(self):
span_processor.shutdown()
+ def test_flush_empty(self):
+ spans_names_list = []
+
+ my_exporter = MySpanExporter(destination=spans_names_list)
+ span_processor = export.BatchExportSpanProcessor(my_exporter)
+
+ self.assertTrue(span_processor.force_flush())
+
+ def test_flush_from_multiple_threads(self):
+ num_threads = 50
+ num_spans = 10
+
+ span_list = []
+
+ my_exporter = MySpanExporter(destination=span_list)
+ span_processor = export.BatchExportSpanProcessor(
+ my_exporter, max_queue_size=512, max_export_batch_size=128
+ )
+
+ def create_spans_and_flush(tno: int):
+ for span_idx in range(num_spans):
+ _create_start_and_end_span(
+ "Span {}-{}".format(tno, span_idx), span_processor
+ )
+ self.assertTrue(span_processor.force_flush())
+
+ with ThreadPoolExecutor(max_workers=num_threads) as executor:
+ future_list = []
+ for thread_no in range(num_threads):
+ future = executor.submit(create_spans_and_flush, thread_no)
+ future_list.append(future)
+
+ executor.shutdown()
+
+ self.assertEqual(num_threads * num_spans, len(span_list))
+
def test_flush_timeout(self):
spans_names_list = []
@@ -177,6 +243,7 @@ def test_batch_span_processor_lossless(self):
for _ in range(512):
_create_start_and_end_span("foo", span_processor)
+ time.sleep(1)
self.assertTrue(span_processor.force_flush())
self.assertEqual(len(spans_names_list), 512)
span_processor.shutdown()
@@ -205,21 +272,51 @@ def test_batch_span_processor_many_spans(self):
self.assertEqual(len(spans_names_list), 1024)
span_processor.shutdown()
+ def test_batch_span_processor_not_sampled(self):
+ tracer_provider = trace.TracerProvider(
+ sampler=trace.sampling.ALWAYS_OFF
+ )
+ tracer = tracer_provider.get_tracer(__name__)
+ spans_names_list = []
+
+ my_exporter = MySpanExporter(
+ destination=spans_names_list, max_export_batch_size=128
+ )
+ span_processor = export.BatchExportSpanProcessor(
+ my_exporter,
+ max_queue_size=256,
+ max_export_batch_size=64,
+ schedule_delay_millis=100,
+ )
+ tracer_provider.add_span_processor(span_processor)
+ with tracer.start_as_current_span("foo"):
+ pass
+ time.sleep(0.05) # give some time for the exporter to upload spans
+
+ self.assertTrue(span_processor.force_flush())
+ self.assertEqual(len(spans_names_list), 0)
+ span_processor.shutdown()
+
def test_batch_span_processor_scheduled_delay(self):
"""Test that spans are exported each schedule_delay_millis"""
spans_names_list = []
- my_exporter = MySpanExporter(destination=spans_names_list)
+ export_event = threading.Event()
+ my_exporter = MySpanExporter(
+ destination=spans_names_list, export_event=export_event
+ )
span_processor = export.BatchExportSpanProcessor(
- my_exporter, schedule_delay_millis=50
+ my_exporter, schedule_delay_millis=50,
)
# create single span
+ start_time = time.time()
_create_start_and_end_span("foo", span_processor)
- time.sleep(0.05 + 0.02)
- # span should be already exported
+ self.assertTrue(export_event.wait(2))
+ export_time = time.time()
self.assertEqual(len(spans_names_list), 1)
+ self.assertGreaterEqual((export_time - start_time) * 1e3, 50)
span_processor.shutdown()
diff --git a/opentelemetry-sdk/tests/trace/test_implementation.py b/opentelemetry-sdk/tests/trace/test_implementation.py
index cd53a643951..4582b069578 100644
--- a/opentelemetry-sdk/tests/trace/test_implementation.py
+++ b/opentelemetry-sdk/tests/trace/test_implementation.py
@@ -31,11 +31,11 @@ def test_tracer(self):
with tracer.start_span("test") as span:
self.assertNotEqual(span.get_context(), INVALID_SPAN_CONTEXT)
self.assertNotEqual(span, INVALID_SPAN)
- self.assertIs(span.is_recording_events(), True)
+ self.assertIs(span.is_recording(), True)
with tracer.start_span("test2") as span2:
self.assertNotEqual(span2.get_context(), INVALID_SPAN_CONTEXT)
self.assertNotEqual(span2, INVALID_SPAN)
- self.assertIs(span2.is_recording_events(), True)
+ self.assertIs(span2.is_recording(), True)
def test_span(self):
with self.assertRaises(Exception):
@@ -44,4 +44,4 @@ def test_span(self):
span = trace.Span("name", INVALID_SPAN_CONTEXT)
self.assertEqual(span.get_context(), INVALID_SPAN_CONTEXT)
- self.assertIs(span.is_recording_events(), True)
+ self.assertIs(span.is_recording(), True)
diff --git a/opentelemetry-api/tests/trace/test_sampling.py b/opentelemetry-sdk/tests/trace/test_sampling.py
similarity index 63%
rename from opentelemetry-api/tests/trace/test_sampling.py
rename to opentelemetry-sdk/tests/trace/test_sampling.py
index 0be222f3dc0..de1c019fb56 100644
--- a/opentelemetry-api/tests/trace/test_sampling.py
+++ b/opentelemetry-sdk/tests/trace/test_sampling.py
@@ -16,12 +16,49 @@
import unittest
from opentelemetry import trace
-from opentelemetry.trace import sampling
+from opentelemetry.sdk.trace import sampling
TO_DEFAULT = trace.TraceFlags(trace.TraceFlags.DEFAULT)
TO_SAMPLED = trace.TraceFlags(trace.TraceFlags.SAMPLED)
+class TestDecision(unittest.TestCase):
+ def test_is_recording(self):
+ self.assertTrue(
+ sampling.Decision.is_recording(sampling.Decision.RECORD)
+ )
+ self.assertTrue(
+ sampling.Decision.is_recording(
+ sampling.Decision.RECORD_AND_SAMPLED
+ )
+ )
+ self.assertFalse(
+ sampling.Decision.is_recording(sampling.Decision.NOT_RECORD)
+ )
+
+ def test_is_sampled(self):
+ self.assertFalse(
+ sampling.Decision.is_sampled(sampling.Decision.RECORD)
+ )
+ self.assertTrue(
+ sampling.Decision.is_sampled(sampling.Decision.RECORD_AND_SAMPLED)
+ )
+ self.assertFalse(
+ sampling.Decision.is_sampled(sampling.Decision.NOT_RECORD)
+ )
+
+
+class TestSamplingResult(unittest.TestCase):
+ def test_ctr(self):
+ attributes = {"asd": "test"}
+ result = sampling.SamplingResult(sampling.Decision.RECORD, attributes)
+ self.assertIs(result.decision, sampling.Decision.RECORD)
+ with self.assertRaises(TypeError):
+ result.attributes["test"] = "mess-this-up"
+ self.assertTrue(len(result.attributes), 1)
+ self.assertEqual(result.attributes["asd"], "test")
+
+
class TestSampler(unittest.TestCase):
def test_always_on(self):
no_record_always_on = sampling.ALWAYS_ON.should_sample(
@@ -30,10 +67,12 @@ def test_always_on(self):
),
0xDEADBEF1,
0xDEADBEF2,
- "unsampled parent, sampling on",
+ {"unsampled parent": "sampling on"},
+ )
+ self.assertTrue(no_record_always_on.decision.is_sampled())
+ self.assertEqual(
+ no_record_always_on.attributes, {"unsampled parent": "sampling on"}
)
- self.assertTrue(no_record_always_on.sampled)
- self.assertEqual(no_record_always_on.attributes, {})
sampled_always_on = sampling.ALWAYS_ON.should_sample(
trace.SpanContext(
@@ -41,10 +80,12 @@ def test_always_on(self):
),
0xDEADBEF1,
0xDEADBEF2,
- "sampled parent, sampling on",
+ {"sampled parent": "sampling on"},
+ )
+ self.assertTrue(no_record_always_on.decision.is_sampled())
+ self.assertEqual(
+ sampled_always_on.attributes, {"sampled parent": "sampling on"}
)
- self.assertTrue(sampled_always_on.sampled)
- self.assertEqual(sampled_always_on.attributes, {})
def test_always_off(self):
no_record_always_off = sampling.ALWAYS_OFF.should_sample(
@@ -55,7 +96,7 @@ def test_always_off(self):
0xDEADBEF2,
"unsampled parent, sampling off",
)
- self.assertFalse(no_record_always_off.sampled)
+ self.assertFalse(no_record_always_off.decision.is_sampled())
self.assertEqual(no_record_always_off.attributes, {})
sampled_always_on = sampling.ALWAYS_OFF.should_sample(
@@ -66,7 +107,7 @@ def test_always_off(self):
0xDEADBEF2,
"sampled parent, sampling off",
)
- self.assertFalse(sampled_always_on.sampled)
+ self.assertFalse(sampled_always_on.decision.is_sampled())
self.assertEqual(sampled_always_on.attributes, {})
def test_default_on(self):
@@ -78,7 +119,7 @@ def test_default_on(self):
0xDEADBEF2,
"unsampled parent, sampling on",
)
- self.assertFalse(no_record_default_on.sampled)
+ self.assertFalse(no_record_default_on.decision.is_sampled())
self.assertEqual(no_record_default_on.attributes, {})
sampled_default_on = sampling.DEFAULT_ON.should_sample(
@@ -87,10 +128,20 @@ def test_default_on(self):
),
0xDEADBEF1,
0xDEADBEF2,
- "sampled parent, sampling on",
+ {"sampled parent": "sampling on"},
+ )
+ self.assertTrue(sampled_default_on.decision.is_sampled())
+ self.assertEqual(
+ sampled_default_on.attributes, {"sampled parent": "sampling on"}
+ )
+
+ default_on = sampling.DEFAULT_ON.should_sample(
+ None, 0xDEADBEF1, 0xDEADBEF2, {"sampled parent": "sampling on"},
+ )
+ self.assertTrue(default_on.decision.is_sampled())
+ self.assertEqual(
+ default_on.attributes, {"sampled parent": "sampling on"}
)
- self.assertTrue(sampled_default_on.sampled)
- self.assertEqual(sampled_default_on.attributes, {})
def test_default_off(self):
no_record_default_off = sampling.DEFAULT_OFF.should_sample(
@@ -101,7 +152,7 @@ def test_default_off(self):
0xDEADBEF2,
"unsampled parent, sampling off",
)
- self.assertFalse(no_record_default_off.sampled)
+ self.assertFalse(no_record_default_off.decision.is_sampled())
self.assertEqual(no_record_default_off.attributes, {})
sampled_default_off = sampling.DEFAULT_OFF.should_sample(
@@ -110,70 +161,49 @@ def test_default_off(self):
),
0xDEADBEF1,
0xDEADBEF2,
- "sampled parent, sampling off",
+ {"sampled parent": "sampling on"},
+ )
+ self.assertTrue(sampled_default_off.decision.is_sampled())
+ self.assertEqual(
+ sampled_default_off.attributes, {"sampled parent": "sampling on"}
)
- self.assertTrue(sampled_default_off.sampled)
- self.assertEqual(sampled_default_off.attributes, {})
+
+ default_off = sampling.DEFAULT_OFF.should_sample(
+ None, 0xDEADBEF1, 0xDEADBEF2, "unsampled parent, sampling off",
+ )
+ self.assertFalse(default_off.decision.is_sampled())
+ self.assertEqual(default_off.attributes, {})
def test_probability_sampler(self):
- sampler = sampling.ProbabilitySampler(0.5)
+ sampler = sampling.TraceIdRatioBased(0.5)
# Check that we sample based on the trace ID if the parent context is
# null
self.assertTrue(
sampler.should_sample(
None, 0x7FFFFFFFFFFFFFFF, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
self.assertFalse(
sampler.should_sample(
None, 0x8000000000000000, 0xDEADBEEF, "span name"
- ).sampled
- )
-
- # Check that the sampling decision matches the parent context if given,
- # and that the sampler ignores the trace ID
- self.assertFalse(
- sampler.should_sample(
- trace.SpanContext(
- 0xDEADBEF0,
- 0xDEADBEF1,
- is_remote=False,
- trace_flags=TO_DEFAULT,
- ),
- 0x7FFFFFFFFFFFFFFF,
- 0xDEADBEEF,
- "span name",
- ).sampled
- )
- self.assertTrue(
- sampler.should_sample(
- trace.SpanContext(
- 0xDEADBEF0,
- 0xDEADBEF1,
- is_remote=False,
- trace_flags=TO_SAMPLED,
- ),
- 0x8000000000000000,
- 0xDEADBEEF,
- "span name",
- ).sampled
+ ).decision.is_sampled()
)
def test_probability_sampler_zero(self):
- default_off = sampling.ProbabilitySampler(0.0)
+ default_off = sampling.TraceIdRatioBased(0.0)
self.assertFalse(
default_off.should_sample(
None, 0x0, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
def test_probability_sampler_one(self):
- default_off = sampling.ProbabilitySampler(1.0)
+ default_off = sampling.TraceIdRatioBased(1.0)
self.assertTrue(
default_off.should_sample(
None, 0xFFFFFFFFFFFFFFFF, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
def test_probability_sampler_limits(self):
@@ -181,19 +211,19 @@ def test_probability_sampler_limits(self):
# Sample one of every 2^64 (= 5e-20) traces. This is the lowest
# possible meaningful sampling rate, only traces with trace ID 0x0
# should get sampled.
- almost_always_off = sampling.ProbabilitySampler(2 ** -64)
+ almost_always_off = sampling.TraceIdRatioBased(2 ** -64)
self.assertTrue(
almost_always_off.should_sample(
None, 0x0, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
self.assertFalse(
almost_always_off.should_sample(
None, 0x1, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
self.assertEqual(
- sampling.ProbabilitySampler.get_bound_for_rate(2 ** -64), 0x1
+ sampling.TraceIdRatioBased.get_bound_for_rate(2 ** -64), 0x1
)
# Sample every trace with trace ID less than 0xffffffffffffffff. In
@@ -204,11 +234,11 @@ def test_probability_sampler_limits(self):
#
# 1 - sys.float_info.epsilon
- almost_always_on = sampling.ProbabilitySampler(1 - 2 ** -64)
+ almost_always_on = sampling.TraceIdRatioBased(1 - 2 ** -64)
self.assertTrue(
almost_always_on.should_sample(
None, 0xFFFFFFFFFFFFFFFE, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
# These tests are logically consistent, but fail because of the float
@@ -224,19 +254,19 @@ def test_probability_sampler_limits(self):
# ).sampled
# )
# self.assertEqual(
- # sampling.ProbabilitySampler.get_bound_for_rate(1 - 2 ** -64)),
+ # sampling.TraceIdRatioBased.get_bound_for_rate(1 - 2 ** -64)),
# 0xFFFFFFFFFFFFFFFF,
# )
# Check that a sampler with the highest effective sampling rate < 1
# refuses to sample traces with trace ID 0xffffffffffffffff.
- almost_almost_always_on = sampling.ProbabilitySampler(
+ almost_almost_always_on = sampling.TraceIdRatioBased(
1 - sys.float_info.epsilon
)
self.assertFalse(
almost_almost_always_on.should_sample(
None, 0xFFFFFFFFFFFFFFFF, 0xDEADBEEF, "span name"
- ).sampled
+ ).decision.is_sampled()
)
# Check that the higest effective sampling rate is actually lower than
# the highest theoretical sampling rate. If this test fails the test
@@ -244,3 +274,35 @@ def test_probability_sampler_limits(self):
self.assertLess(
almost_almost_always_on.bound, 0xFFFFFFFFFFFFFFFF,
)
+
+ def test_parent_based(self):
+ sampler = sampling.ParentBased(sampling.ALWAYS_ON)
+ # Check that the sampling decision matches the parent context if given
+ self.assertFalse(
+ sampler.should_sample(
+ trace.SpanContext(
+ 0xDEADBEF0,
+ 0xDEADBEF1,
+ is_remote=False,
+ trace_flags=TO_DEFAULT,
+ ),
+ 0x7FFFFFFFFFFFFFFF,
+ 0xDEADBEEF,
+ "span name",
+ ).decision.is_sampled()
+ )
+
+ sampler2 = sampling.ParentBased(sampling.ALWAYS_OFF)
+ self.assertTrue(
+ sampler2.should_sample(
+ trace.SpanContext(
+ 0xDEADBEF0,
+ 0xDEADBEF1,
+ is_remote=False,
+ trace_flags=TO_SAMPLED,
+ ),
+ 0x8000000000000000,
+ 0xDEADBEEF,
+ "span name",
+ ).decision.is_sampled()
+ )
diff --git a/opentelemetry-sdk/tests/trace/test_trace.py b/opentelemetry-sdk/tests/trace/test_trace.py
index 56bb9cfa574..a6b4fa93e99 100644
--- a/opentelemetry-sdk/tests/trace/test_trace.py
+++ b/opentelemetry-sdk/tests/trace/test_trace.py
@@ -20,8 +20,8 @@
from opentelemetry import trace as trace_api
from opentelemetry.sdk import resources, trace
+from opentelemetry.sdk.trace import Resource, sampling
from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
-from opentelemetry.trace import sampling
from opentelemetry.trace.status import StatusCanonicalCode
from opentelemetry.util import time_ns
@@ -140,6 +140,12 @@ def test_default_sampler(self):
child_span = tracer.start_span(name="child span", parent=root_span)
self.assertIsInstance(child_span, trace.Span)
self.assertTrue(root_span.context.trace_flags.sampled)
+ self.assertEqual(
+ root_span.get_context().trace_flags, trace_api.TraceFlags.SAMPLED
+ )
+ self.assertEqual(
+ child_span.get_context().trace_flags, trace_api.TraceFlags.SAMPLED
+ )
def test_sampler_no_sampling(self):
tracer_provider = trace.TracerProvider(sampling.ALWAYS_OFF)
@@ -151,6 +157,12 @@ def test_sampler_no_sampling(self):
self.assertIsInstance(root_span, trace_api.DefaultSpan)
child_span = tracer.start_span(name="child span", parent=root_span)
self.assertIsInstance(child_span, trace_api.DefaultSpan)
+ self.assertEqual(
+ root_span.get_context().trace_flags, trace_api.TraceFlags.DEFAULT
+ )
+ self.assertEqual(
+ child_span.get_context().trace_flags, trace_api.TraceFlags.DEFAULT
+ )
class TestSpanCreation(unittest.TestCase):
@@ -165,7 +177,7 @@ def test_start_span_invalid_spancontext(self):
new_span = tracer.start_span(
"root", parent=trace_api.INVALID_SPAN_CONTEXT
)
- self.assertTrue(new_span.context.is_valid())
+ self.assertTrue(new_span.context.is_valid)
self.assertIsNone(new_span.parent)
def test_instrumentation_info(self):
@@ -201,7 +213,7 @@ def test_invalid_instrumentation_info(self):
tracer1.instrumentation_info, InstrumentationInfo
)
span1 = tracer1.start_span("foo")
- self.assertTrue(span1.is_recording_events())
+ self.assertTrue(span1.is_recording())
self.assertEqual(tracer1.instrumentation_info.version, "")
self.assertEqual(
tracer1.instrumentation_info.name, "ERROR:MISSING MODULE NAME"
@@ -384,7 +396,7 @@ def test_default_span_resource(self):
tracer = tracer_provider.get_tracer(__name__)
span = tracer.start_span("root")
# pylint: disable=protected-access
- self.assertIs(span.resource, resources._EMPTY_RESOURCE)
+ self.assertIs(span.resource, resources._DEFAULT_RESOURCE)
def test_span_context_remote_flag(self):
tracer = new_tracer()
@@ -521,36 +533,25 @@ def test_check_attribute_helper(self):
self.assertTrue(trace._is_valid_attribute_value(15))
def test_sampling_attributes(self):
- decision_attributes = {
+ sampling_attributes = {
"sampler-attr": "sample-val",
"attr-in-both": "decision-attr",
}
tracer_provider = trace.TracerProvider(
- sampling.StaticSampler(
- sampling.Decision(sampled=True, attributes=decision_attributes)
- )
+ sampling.StaticSampler(sampling.Decision.RECORD_AND_SAMPLED,)
)
self.tracer = tracer_provider.get_tracer(__name__)
- with self.tracer.start_as_current_span("root2") as root:
- self.assertEqual(len(root.attributes), 2)
- self.assertEqual(root.attributes["sampler-attr"], "sample-val")
- self.assertEqual(root.attributes["attr-in-both"], "decision-attr")
-
- attributes = {
- "attr-key": "val",
- "attr-key2": "val2",
- "attr-in-both": "span-attr",
- }
with self.tracer.start_as_current_span(
- "root2", attributes=attributes
+ name="root2", attributes=sampling_attributes
) as root:
- self.assertEqual(len(root.attributes), 4)
- self.assertEqual(root.attributes["attr-key"], "val")
- self.assertEqual(root.attributes["attr-key2"], "val2")
+ self.assertEqual(len(root.attributes), 2)
self.assertEqual(root.attributes["sampler-attr"], "sample-val")
self.assertEqual(root.attributes["attr-in-both"], "decision-attr")
+ self.assertEqual(
+ root.get_context().trace_flags, trace_api.TraceFlags.SAMPLED
+ )
def test_events(self):
self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN)
@@ -572,13 +573,7 @@ def test_events(self):
mutable_list = ["original_contents"]
root.add_event("event3", {"name": mutable_list})
- def event_formatter():
- return {"name": "hello"}
-
- # lazy event
- root.add_lazy_event("event4", event_formatter, now)
-
- self.assertEqual(len(root.events), 5)
+ self.assertEqual(len(root.events), 4)
self.assertEqual(root.events[0].name, "event0")
self.assertEqual(root.events[0].attributes, {})
@@ -604,10 +599,6 @@ def event_formatter():
root.events[3].attributes, {"name": ("original_contents",)}
)
- self.assertEqual(root.events[4].name, "event4")
- self.assertEqual(root.events[4].attributes, {"name": "hello"})
- self.assertEqual(root.events[4].timestamp, now)
-
def test_invalid_event_attributes(self):
self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN)
@@ -634,23 +625,14 @@ def test_links(self):
span_id=trace.generate_span_id(),
is_remote=False,
)
- other_context3 = trace_api.SpanContext(
- trace_id=trace.generate_trace_id(),
- span_id=trace.generate_span_id(),
- is_remote=False,
- )
-
- def get_link_attributes():
- return {"component": "http"}
links = (
trace_api.Link(other_context1),
trace_api.Link(other_context2, {"name": "neighbor"}),
- trace_api.LazyLink(other_context3, get_link_attributes),
)
with self.tracer.start_as_current_span("root", links=links) as root:
- self.assertEqual(len(root.links), 3)
+ self.assertEqual(len(root.links), 2)
self.assertEqual(
root.links[0].context.trace_id, other_context1.trace_id
)
@@ -665,10 +647,6 @@ def get_link_attributes():
root.links[1].context.span_id, other_context2.span_id
)
self.assertEqual(root.links[1].attributes, {"name": "neighbor"})
- self.assertEqual(
- root.links[2].context.span_id, other_context3.span_id
- )
- self.assertEqual(root.links[2].attributes, {"component": "http"})
def test_update_name(self):
with self.tracer.start_as_current_span("root") as root:
@@ -944,6 +922,7 @@ def test_to_json(self):
trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED),
)
span = trace.Span("span-name", context)
+ span.resource = Resource({})
self.assertEqual(
span.to_json(),
diff --git a/tests/opentelemetry-docker-tests/tests/asyncpg/test_asyncpg_functional.py b/tests/opentelemetry-docker-tests/tests/asyncpg/test_asyncpg_functional.py
index 87382702f2a..cb9080e62c2 100644
--- a/tests/opentelemetry-docker-tests/tests/asyncpg/test_asyncpg_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/asyncpg/test_asyncpg_functional.py
@@ -14,7 +14,7 @@
POSTGRES_USER = os.getenv("POSTGRESQL_HOST ", "testuser")
-def _await(coro):
+def async_call(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
@@ -27,7 +27,7 @@ def setUpClass(cls):
cls._cursor = None
cls._tracer = cls.tracer_provider.get_tracer(__name__)
AsyncPGInstrumentor().instrument(tracer_provider=cls.tracer_provider)
- cls._connection = _await(
+ cls._connection = async_call(
asyncpg.connect(
database=POSTGRES_DB_NAME,
user=POSTGRES_USER,
@@ -42,7 +42,7 @@ def tearDownClass(cls):
AsyncPGInstrumentor().uninstrument()
def test_instrumented_execute_method_without_arguments(self, *_, **__):
- _await(self._connection.execute("SELECT 42;"))
+ async_call(self._connection.execute("SELECT 42;"))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -59,7 +59,7 @@ def test_instrumented_execute_method_without_arguments(self, *_, **__):
)
def test_instrumented_fetch_method_without_arguments(self, *_, **__):
- _await(self._connection.fetch("SELECT 42;"))
+ async_call(self._connection.fetch("SELECT 42;"))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -77,7 +77,7 @@ async def _transaction_execute():
async with self._connection.transaction():
await self._connection.execute("SELECT 42;")
- _await(_transaction_execute())
+ async_call(_transaction_execute())
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(3, len(spans))
@@ -124,7 +124,7 @@ async def _transaction_execute():
await self._connection.execute("SELECT 42::uuid;")
with self.assertRaises(asyncpg.CannotCoerceError):
- _await(_transaction_execute())
+ async_call(_transaction_execute())
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(3, len(spans))
@@ -167,7 +167,7 @@ async def _transaction_execute():
)
def test_instrumented_method_doesnt_capture_parameters(self, *_, **__):
- _await(self._connection.execute("SELECT $1;", "1"))
+ async_call(self._connection.execute("SELECT $1;", "1"))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -198,7 +198,7 @@ def setUpClass(cls):
AsyncPGInstrumentor(capture_parameters=True).instrument(
tracer_provider=cls.tracer_provider
)
- cls._connection = _await(
+ cls._connection = async_call(
asyncpg.connect(
database=POSTGRES_DB_NAME,
user=POSTGRES_USER,
@@ -213,7 +213,7 @@ def tearDownClass(cls):
AsyncPGInstrumentor().uninstrument()
def test_instrumented_execute_method_with_arguments(self, *_, **__):
- _await(self._connection.execute("SELECT $1;", "1"))
+ async_call(self._connection.execute("SELECT $1;", "1"))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -231,7 +231,7 @@ def test_instrumented_execute_method_with_arguments(self, *_, **__):
)
def test_instrumented_fetch_method_with_arguments(self, *_, **__):
- _await(self._connection.fetch("SELECT $1;", "1"))
+ async_call(self._connection.fetch("SELECT $1;", "1"))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -246,7 +246,7 @@ def test_instrumented_fetch_method_with_arguments(self, *_, **__):
)
def test_instrumented_executemany_method_with_arguments(self, *_, **__):
- _await(self._connection.executemany("SELECT $1;", [["1"], ["2"]]))
+ async_call(self._connection.executemany("SELECT $1;", [["1"], ["2"]]))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
@@ -262,7 +262,7 @@ def test_instrumented_executemany_method_with_arguments(self, *_, **__):
def test_instrumented_execute_interface_error_method(self, *_, **__):
with self.assertRaises(asyncpg.InterfaceError):
- _await(self._connection.execute("SELECT 42;", 1, 2, 3))
+ async_call(self._connection.execute("SELECT 42;", 1, 2, 3))
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(
diff --git a/tests/opentelemetry-docker-tests/tests/celery/test_celery_functional.py b/tests/opentelemetry-docker-tests/tests/celery/test_celery_functional.py
index 2714c8ee033..f18c6cdba14 100644
--- a/tests/opentelemetry-docker-tests/tests/celery/test_celery_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/celery/test_celery_functional.py
@@ -13,11 +13,8 @@
# limitations under the License.
-import time
-
import celery
import pytest
-from celery import signals
from celery.exceptions import Retry
import opentelemetry.instrumentation.celery
diff --git a/tests/opentelemetry-docker-tests/tests/check_availability.py b/tests/opentelemetry-docker-tests/tests/check_availability.py
index 91b8e5539dc..30825721934 100644
--- a/tests/opentelemetry-docker-tests/tests/check_availability.py
+++ b/tests/opentelemetry-docker-tests/tests/check_availability.py
@@ -36,7 +36,7 @@
POSTGRES_USER = os.getenv("POSTGRESQL_HOST", "testuser")
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = int(os.getenv("REDIS_PORT ", "6379"))
-RETRY_COUNT = 5
+RETRY_COUNT = 8
RETRY_INTERVAL = 5 # Seconds
logger = logging.getLogger(__name__)
diff --git a/tests/opentelemetry-docker-tests/tests/mysql/test_mysql_functional.py b/tests/opentelemetry-docker-tests/tests/mysql/test_mysql_functional.py
index f2b07293bfc..4116f4a19e5 100644
--- a/tests/opentelemetry-docker-tests/tests/mysql/test_mysql_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/mysql/test_mysql_functional.py
@@ -13,7 +13,6 @@
# limitations under the License.
import os
-import time
import mysql.connector
@@ -36,14 +35,6 @@ def setUpClass(cls):
cls._cursor = None
cls._tracer = cls.tracer_provider.get_tracer(__name__)
MySQLInstrumentor().instrument()
- cls._connection = mysql.connector.connect(
- user=MYSQL_USER,
- password=MYSQL_PASSWORD,
- host=MYSQL_HOST,
- port=MYSQL_PORT,
- database=MYSQL_DB_NAME,
- )
- cls._cursor = cls._connection.cursor()
@classmethod
def tearDownClass(cls):
@@ -51,6 +42,17 @@ def tearDownClass(cls):
cls._connection.close()
MySQLInstrumentor().uninstrument()
+ def setUp(self):
+ super().setUp()
+ self._connection = mysql.connector.connect(
+ user=MYSQL_USER,
+ password=MYSQL_PASSWORD,
+ host=MYSQL_HOST,
+ port=MYSQL_PORT,
+ database=MYSQL_DB_NAME,
+ )
+ self._cursor = self._connection.cursor()
+
def validate_spans(self):
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 2)
@@ -79,6 +81,23 @@ def test_execute(self):
self._cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
self.validate_spans()
+ def test_execute_with_connection_context_manager(self):
+ """Should create a child span for execute with connection context
+ """
+ with self._tracer.start_as_current_span("rootSpan"):
+ with self._connection as conn:
+ cursor = conn.cursor()
+ cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
+ self.validate_spans()
+
+ def test_execute_with_cursor_context_manager(self):
+ """Should create a child span for execute with cursor context
+ """
+ with self._tracer.start_as_current_span("rootSpan"):
+ with self._connection.cursor() as cursor:
+ cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
+ self.validate_spans()
+
def test_executemany(self):
"""Should create a child span for executemany
"""
diff --git a/tests/opentelemetry-docker-tests/tests/postgres/test_aiopg_functional.py b/tests/opentelemetry-docker-tests/tests/postgres/test_aiopg_functional.py
index 1762da1d097..9eb209636d9 100644
--- a/tests/opentelemetry-docker-tests/tests/postgres/test_aiopg_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/postgres/test_aiopg_functional.py
@@ -13,7 +13,6 @@
# limitations under the License.
import asyncio
import os
-import time
import aiopg
import psycopg2
diff --git a/tests/opentelemetry-docker-tests/tests/postgres/test_psycopg_functional.py b/tests/opentelemetry-docker-tests/tests/postgres/test_psycopg_functional.py
index a8e07ddb27e..8a703b00944 100644
--- a/tests/opentelemetry-docker-tests/tests/postgres/test_psycopg_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/postgres/test_psycopg_functional.py
@@ -13,7 +13,6 @@
# limitations under the License.
import os
-import time
import psycopg2
@@ -86,6 +85,24 @@ def test_execute(self):
)
self.validate_spans()
+ def test_execute_with_connection_context_manager(self):
+ """Should create a child span for execute with connection context
+ """
+ with self._tracer.start_as_current_span("rootSpan"):
+ with self._connection as conn:
+ cursor = conn.cursor()
+ cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
+ self.validate_spans()
+
+ def test_execute_with_cursor_context_manager(self):
+ """Should create a child span for execute with cursor context
+ """
+ with self._tracer.start_as_current_span("rootSpan"):
+ with self._connection.cursor() as cursor:
+ cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
+ self.validate_spans()
+ self.assertTrue(cursor.closed)
+
def test_executemany(self):
"""Should create a child span for executemany
"""
diff --git a/tests/opentelemetry-docker-tests/tests/pymysql/test_pymysql_functional.py b/tests/opentelemetry-docker-tests/tests/pymysql/test_pymysql_functional.py
index 1636f85fba5..7b0cb5b0c03 100644
--- a/tests/opentelemetry-docker-tests/tests/pymysql/test_pymysql_functional.py
+++ b/tests/opentelemetry-docker-tests/tests/pymysql/test_pymysql_functional.py
@@ -78,6 +78,14 @@ def test_execute(self):
self._cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
self.validate_spans()
+ def test_execute_with_cursor_context_manager(self):
+ """Should create a child span for execute with cursor context
+ """
+ with self._tracer.start_as_current_span("rootSpan"):
+ with self._connection.cursor() as cursor:
+ cursor.execute("CREATE TABLE IF NOT EXISTS test (id INT)")
+ self.validate_spans()
+
def test_executemany(self):
"""Should create a child span for executemany
"""
diff --git a/tests/util/src/opentelemetry/test/mock_httptextformat.py b/tests/util/src/opentelemetry/test/mock_textmap.py
similarity index 80%
rename from tests/util/src/opentelemetry/test/mock_httptextformat.py
rename to tests/util/src/opentelemetry/test/mock_textmap.py
index 76165c3e4b2..92c0f21f0ec 100644
--- a/tests/util/src/opentelemetry/test/mock_httptextformat.py
+++ b/tests/util/src/opentelemetry/test/mock_textmap.py
@@ -16,15 +16,15 @@
from opentelemetry import trace
from opentelemetry.context import Context, get_current
-from opentelemetry.trace.propagation.httptextformat import (
+from opentelemetry.trace.propagation.textmap import (
Getter,
- HTTPTextFormat,
- HTTPTextFormatT,
Setter,
+ TextMapPropagator,
+ TextMapPropagatorT,
)
-class NOOPHTTPTextFormat(HTTPTextFormat):
+class NOOPTextMapPropagator(TextMapPropagator):
"""A propagator that does not extract nor inject.
This class is useful for catching edge cases assuming
@@ -33,22 +33,22 @@ class NOOPHTTPTextFormat(HTTPTextFormat):
def extract(
self,
- get_from_carrier: Getter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ get_from_carrier: Getter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
return get_current()
def inject(
self,
- set_in_carrier: Setter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ set_in_carrier: Setter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
return None
-class MockHTTPTextFormat(HTTPTextFormat):
+class MockTextMapPropagator(TextMapPropagator):
"""Mock propagator for testing purposes."""
TRACE_ID_KEY = "mock-traceid"
@@ -56,8 +56,8 @@ class MockHTTPTextFormat(HTTPTextFormat):
def extract(
self,
- get_from_carrier: Getter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ get_from_carrier: Getter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
trace_id_list = get_from_carrier(carrier, self.TRACE_ID_KEY)
@@ -78,8 +78,8 @@ def extract(
def inject(
self,
- set_in_carrier: Setter[HTTPTextFormatT],
- carrier: HTTPTextFormatT,
+ set_in_carrier: Setter[TextMapPropagatorT],
+ carrier: TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
span = trace.get_current_span(context)
diff --git a/tox.ini b/tox.ini
index 91024f7f3d4..0fbaabce83c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -365,6 +365,10 @@ deps =
-c {toxinidir}/dev-requirements.txt
-r {toxinidir}/docs-requirements.txt
+commands_pre =
+ pip install -e {toxinidir}/opentelemetry-api \
+ -e {toxinidir}/opentelemetry-sdk
+
changedir = docs
commands =
@@ -430,4 +434,4 @@ commands =
pytest {posargs}
commands_post =
- docker-compose down
+ docker-compose down -v