diff --git a/.eslintrc.json b/.eslintrc.json
index 4a8615fdac0..e59703fb064 100644
--- a/.eslintrc.json
+++ b/.eslintrc.json
@@ -1,6 +1,6 @@
{
"parserOptions": {
- "ecmaVersion": 2020
+ "ecmaVersion": 2021
},
"extends": [
"eslint:recommended",
diff --git a/.github/workflows/all-green.yml b/.github/workflows/all-green.yml
new file mode 100644
index 00000000000..e83dfb04397
--- /dev/null
+++ b/.github/workflows/all-green.yml
@@ -0,0 +1,16 @@
+name: All Green
+on:
+ pull_request:
+
+jobs:
+
+ all-green:
+ runs-on: ubuntu-latest
+ permissions:
+ checks: read
+ contents: read
+ steps:
+ - uses: wechuli/allcheckspassed@v1
+ with:
+ retries: 20 # once per minute, some checks take up to 15 min
+ checks_exclude: devflow.*
diff --git a/.github/workflows/appsec.yml b/.github/workflows/appsec.yml
index aabc5be4d29..9e6ff9cb764 100644
--- a/.github/workflows/appsec.yml
+++ b/.github/workflows/appsec.yml
@@ -202,17 +202,22 @@ jobs:
- uses: codecov/codecov-action@v3
next:
+ strategy:
+ matrix:
+ version:
+ - 18
+ - latest
runs-on: ubuntu-latest
env:
PLUGINS: next
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/testagent/start
- - uses: ./.github/actions/node/setup
+ - uses: actions/setup-node@v3
+ with:
+ cache: yarn
+ node-version: ${{ matrix.version }}
- run: yarn install
- - uses: ./.github/actions/node/oldest
- - run: yarn test:appsec:plugins:ci
- - uses: ./.github/actions/node/latest
- run: yarn test:appsec:plugins:ci
- if: always()
uses: ./.github/actions/testagent/logs
diff --git a/.github/workflows/plugins.yml b/.github/workflows/plugins.yml
index c780d3d2cca..423948aa557 100644
--- a/.github/workflows/plugins.yml
+++ b/.github/workflows/plugins.yml
@@ -458,7 +458,7 @@ jobs:
runs-on: ubuntu-latest
services:
elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0
+ image: elasticsearch:7.14.0
env:
discovery.type: single-node
ports:
@@ -935,6 +935,11 @@ jobs:
# TODO: fix performance issues and test more Node versions
next:
+ strategy:
+ matrix:
+ version:
+ - 18
+ - latest
runs-on: ubuntu-latest
env:
PLUGINS: next
@@ -943,9 +948,6 @@ jobs:
- uses: ./.github/actions/testagent/start
- uses: ./.github/actions/node/setup
- run: yarn install
- - uses: ./.github/actions/node/oldest
- - run: yarn test:plugins:ci
- - uses: ./.github/actions/node/latest
- run: yarn test:plugins:ci
- if: always()
uses: ./.github/actions/testagent/logs
diff --git a/.github/workflows/project.yml b/.github/workflows/project.yml
index 6f3c3f7a8a0..258d827bdf2 100644
--- a/.github/workflows/project.yml
+++ b/.github/workflows/project.yml
@@ -112,3 +112,4 @@ jobs:
- run: yarn install
- run: yarn type:test
- run: yarn type:doc
+
diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml
index b39d1b2f7d4..4a72cdcdb15 100644
--- a/.github/workflows/system-tests.yml
+++ b/.github/workflows/system-tests.yml
@@ -11,14 +11,32 @@ on:
- cron: '00 04 * * 2-6'
jobs:
+ get-essential-scenarios:
+ runs-on: ubuntu-latest
+ outputs:
+ scenario: ${{ steps.parse-yml.outputs.scenario }}
+ steps:
+ - name: Checkout system tests
+ uses: actions/checkout@v4
+ with:
+ repository: 'DataDog/system-tests'
+ - name: Get Essential Scenarios
+ id: parse-yml
+ run: yq -o tsv .TRACER_ESSENTIAL_SCENARIOS ./scenario_groups.yml | xargs node -p "x=process.argv;x.shift();x.push('CROSSED_TRACING_LIBRARIES');'scenario='+JSON.stringify(x)" >> $GITHUB_OUTPUT
+
+
system-tests:
runs-on: ubuntu-latest
+ needs:
+ - get-essential-scenarios
strategy:
matrix:
- include:
- - weblog-variant: express4
- - weblog-variant: express4-typescript
- - weblog-variant: nextjs
+ weblog-variant:
+ - express4
+ - express4-typescript
+ - nextjs
+ scenario: ${{fromJson(needs.get-essential-scenarios.outputs.scenario)}}
+
env:
TEST_LIBRARY: nodejs
WEBLOG_VARIANT: ${{ matrix.weblog-variant }}
@@ -29,39 +47,27 @@ jobs:
uses: actions/checkout@v4
with:
repository: 'DataDog/system-tests'
-
- name: Checkout dd-trace-js
uses: actions/checkout@v4
with:
path: 'binaries/dd-trace-js'
-
- name: Build weblog
run: ./build.sh -i weblog
-
- name: Build runner
uses: ./.github/actions/install_runner
-
- name: Build agent
id: build-agent
run: ./build.sh -i agent
-
- - name: Run
- run: ./run.sh TRACER_ESSENTIAL_SCENARIOS
-
- - name: Run Cross Tracer Propagation Tests
- # run if builds passed (allows these tests to still run in case the previous Run step failed)
- if: steps.build-agent.outcome == 'success'
- run: ./run.sh CROSSED_TRACING_LIBRARIES
-
+ - name: Run scenario ${{ matrix.scenario }}
+ run: ./run.sh ${{ matrix.scenario }}
- name: Compress artifact
if: ${{ always() }}
run: tar -czvf artifact.tar.gz $(ls | grep logs)
-
- name: Upload artifact
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
- name: logs_express-poc
+ name: logs_${{ matrix.weblog-variant }}-${{ matrix.scenario }}
path: artifact.tar.gz
parametric:
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6f45e851b47..10bfe08f0a8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -85,6 +85,35 @@ package-arm:
- ../.gitlab/build-deb-rpm.sh
- find . -iregex '.*\.\(deb\|rpm\)' -printf '%f\0' | xargs -0 dd-pkg lint
+package-oci:
+ extends: .package-oci
+ stage: package
+ script:
+ - ../.gitlab/build_oci_package.sh
+
+oci-internal-publish:
+ extends: .oci-internal-publish
+ stage: package
+ needs: [ package-oci ]
+ rules:
+ - when: on_success
+ variables:
+ FLAVOR: datadog-apm-library-js
+
+oci-internal-test-ecr-publish:
+ stage: package
+ needs: [ oci-internal-publish ]
+ rules:
+ - when: on_success
+ trigger:
+ project: DataDog/public-images
+ branch: main
+ strategy: depend
+ variables:
+ IMG_SOURCES: registry.ddbuild.io/ci/remote-updates/datadog-apm-library-js:pipeline-${CI_PIPELINE_ID}-1
+ IMG_DESTINATIONS: apm-library-js-package:pipeline-${CI_PIPELINE_ID}
+ IMG_REGISTRIES: agent-qa
+
.release-package:
stage: deploy
variables:
@@ -107,40 +136,39 @@ deploy_to_reliability_env:
UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME
UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA
-deploy_to_docker_registries:
+
+generate-lib-init-tag-values:
+ tags: ["arch:amd64"]
+ image: registry.ddbuild.io/ci/auto_inject/gitlab:current
stage: deploy
- needs: []
rules:
- - if: '$CI_COMMIT_TAG =~ /^v.*/ || $CI_COMMIT_TAG == "dev"'
+ # We don't tag prerelease versions
+ - if: '$CI_COMMIT_TAG =~ /^v[0-9]+\.[0-9]+\.[0-9]+$/'
when: on_success
- when: manual
allow_failure: true
- trigger:
- project: DataDog/public-images
- branch: main
- strategy: depend
variables:
- IMG_SOURCES: ghcr.io/datadog/dd-trace-js/dd-lib-js-init:$CI_COMMIT_TAG
- IMG_DESTINATIONS: dd-lib-js-init:$CI_COMMIT_TAG
- IMG_SIGNING: "false"
- RETRY_COUNT: 5
- RETRY_DELAY: 300
+ IMG_DESTINATION_BASE: dd-lib-js-init
+ script:
+ - ./.gitlab/build-lib-init.sh
+ artifacts:
+ reports:
+ dotenv: build.env
-deploy_latest_to_docker_registries:
+deploy-lib-init-trigger:
stage: deploy
- needs: []
- rules:
- - if: '$CI_COMMIT_TAG =~ /^v.*/'
- when: on_success
- - when: manual
- allow_failure: true
+ # needs the version from the generate-tag-values job
+ needs:
+ - job: generate-lib-init-tag-values
+ artifacts: true
trigger:
+# project: DataDog/dd-trace-dotnet-gitlab-test # can be used for testing
project: DataDog/public-images
branch: main
strategy: depend
variables:
IMG_SOURCES: ghcr.io/datadog/dd-trace-js/dd-lib-js-init:$CI_COMMIT_TAG
- IMG_DESTINATIONS: dd-lib-js-init:latest
+ IMG_DESTINATIONS: $IMG_DESTINATIONS
IMG_SIGNING: "false"
RETRY_COUNT: 5
RETRY_DELAY: 300
diff --git a/.gitlab/build-lib-init.sh b/.gitlab/build-lib-init.sh
new file mode 100755
index 00000000000..6f47cfaaee1
--- /dev/null
+++ b/.gitlab/build-lib-init.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+set -e
+
+# Safety checks to make sure we have required values
+if [ -z "$CI_COMMIT_TAG" ]; then
+ echo "Error: CI_COMMIT_TAG was not provided"
+ exit 1
+fi
+
+if [ -z "$CI_COMMIT_SHA" ]; then
+ echo "Error: CI_COMMIT_SHA was not provided"
+ exit 1
+fi
+
+if [ -z "$IMG_DESTINATION_BASE" ]; then
+ echo "Error: IMG_DESTINATION_BASE. This should be set to the destination docker image, excluding the tag name, e.g. dd-lib-dotnet-init"
+ exit 1
+fi
+
+# If this is a pre-release release, we don't publish
+if echo "$CI_COMMIT_TAG" | grep -q "-" > /dev/null; then
+ echo "Error: This is a pre-release version, should not publish images: $CI_COMMIT_TAG"
+ exit 1
+fi
+
+# Calculate the tags we use for floating major and minor versions
+MAJOR_MINOR_VERSION="$(sed -nE 's/^(v[0-9]+\.[0-9]+)\.[0-9]+$/\1/p' <<< ${CI_COMMIT_TAG})"
+MAJOR_VERSION="$(sed -nE 's/^(v[0-9]+)\.[0-9]+\.[0-9]+$/\1/p' <<< ${CI_COMMIT_TAG})"
+
+# Make sure we have all the tags
+git fetch --tags
+
+# We need to determine whether this is is the latest tag and whether it's the latest major or not
+# So we fetch all tags and sort them to find both the latest, and the latest in this major.
+# 'sort' technically gets prerelease versions in the wrong order here, but we explicitly
+# exclude them anyway, as they're ignored for the purposes of determining the 'latest' tags.
+LATEST_TAG="$(git tag | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sort -V -r | head -n 1)"
+LATEST_MAJOR_TAG="$(git tag -l "$MAJOR_VERSION.*" | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sort -V -r | head -n 1)"
+echo "This tag: $CI_COMMIT_TAG"
+echo "Latest repository tag: $LATEST_TAG"
+echo "Latest repository tag for this major: $LATEST_MAJOR_TAG"
+echo "---------"
+
+# GNU sort -C (silent) reports via exit code whether the data is already in sorted order
+# We use this to check whether the current tag is greater than (or equal to) the latest tag
+if printf '%s\n' "$LATEST_TAG" "$CI_COMMIT_TAG" | sort -C -V; then
+ # The current tag is the latest in the repository
+ IS_LATEST_TAG=1
+else
+ IS_LATEST_TAG=0
+fi
+
+if printf '%s\n' "$LATEST_MAJOR_TAG" "$CI_COMMIT_TAG" | sort -C -V; then
+ # The current tag is the latest for this major version in the repository
+ IS_LATEST_MAJOR_TAG=1
+else
+ IS_LATEST_MAJOR_TAG=0
+fi
+
+# print everything for debugging purposes
+echo "Calculated values:"
+echo "MAJOR_MINOR_VERSION=${MAJOR_MINOR_VERSION}"
+echo "MAJOR_VERSION=${MAJOR_VERSION}"
+echo "IS_LATEST_TAG=${IS_LATEST_TAG}"
+echo "IS_LATEST_MAJOR_TAG=${IS_LATEST_MAJOR_TAG}"
+echo "---------"
+
+# Final check that everything is ok
+# We should have a major_minor version
+if [ -z "$MAJOR_MINOR_VERSION" ]; then
+ echo "Error: Could not determine major_minor version for stable release, this should not happen"
+ exit 1
+fi
+
+# if this is a latest major tag, we should have a major version
+if [ "$IS_LATEST_MAJOR_TAG" -eq 1 ] && [ -z "$MAJOR_VERSION" ]; then
+ echo "Error: Could not determine major version for latest major release, this should not happen"
+ exit 1
+fi
+
+# Generate the final variables, and save them into build.env so they can be read by the trigger job
+set_image_tags() {
+ SUFFIX="$1"
+ VARIABLE_SUFFIX="${SUFFIX:+_$SUFFIX}" # add a '_' prefix
+ TAG_SUFFIX="${SUFFIX:+-$SUFFIX}" # add a '-' prefix
+
+ # We always add this tag, regardless of the version
+ DESTINATIONS="${IMG_DESTINATION_BASE}:${CI_COMMIT_TAG}${TAG_SUFFIX}"
+
+ # We always add the major_minor tag (we never release 2.5.2 _after_ 2.5.3, for example)
+ DESTINATIONS="${DESTINATIONS},${IMG_DESTINATION_BASE}:${MAJOR_MINOR_VERSION}${TAG_SUFFIX}"
+
+ # Only latest-major releases get the major tag
+ if [ "$IS_LATEST_MAJOR_TAG" -eq 1 ]; then
+ DESTINATIONS="${DESTINATIONS},${IMG_DESTINATION_BASE}:${MAJOR_VERSION}${TAG_SUFFIX}"
+ fi
+
+ # Only latest releases get the latest tag
+ if [ "$IS_LATEST_TAG" -eq 1 ]; then
+ DESTINATIONS="${DESTINATIONS},${IMG_DESTINATION_BASE}:latest${TAG_SUFFIX}"
+ fi
+
+ # Save the value to the build.env file
+ echo "IMG_DESTINATIONS${VARIABLE_SUFFIX}=${DESTINATIONS}"
+ echo "IMG_DESTINATIONS${VARIABLE_SUFFIX}=${DESTINATIONS}" >> build.env
+}
+
+# Calculate the non-suffixed tags
+set_image_tags
+
+# For each suffix, calculate the tags
+for ADDITIONAL_TAG_SUFFIX in ${ADDITIONAL_TAG_SUFFIXES//,/ }
+do
+ set_image_tags "$ADDITIONAL_TAG_SUFFIX"
+done
\ No newline at end of file
diff --git a/.gitlab/build_oci_package.sh b/.gitlab/build_oci_package.sh
new file mode 100755
index 00000000000..f2965ef54b3
--- /dev/null
+++ b/.gitlab/build_oci_package.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -e
+
+cd ..
+
+npm pack
+
+mkdir -p packaging/sources
+
+npm install --prefix ./packaging/sources/ dd-trace-*.tgz
+
+rm packaging/sources/*.json # package.json and package-lock.json are unneeded
+
+if [ -n "$CI_COMMIT_TAG" ] && [ -z "$JS_PACKAGE_VERSION" ]; then
+ JS_PACKAGE_VERSION=${CI_COMMIT_TAG##v}
+elif [ -z "$CI_COMMIT_TAG" ] && [ -z "$JS_PACKAGE_VERSION" ]; then
+ JS_PACKAGE_VERSION="$(jq --raw-output '.version' package.json).pipeline.${CI_PIPELINE_ID}.beta.${CI_COMMIT_SHORT_SHA}"
+fi
+echo -n $JS_PACKAGE_VERSION > packaging/auto_inject-node.version
+echo -n $JS_PACKAGE_VERSION > packaging/sources/version
+
+cd packaging
+
+datadog-package create \
+ --version="$JS_PACKAGE_VERSION" \
+ --package="datadog-apm-library-js" \
+ --archive=true \
+ --archive-path="datadog-apm-library-js-$JS_PACKAGE_VERSION.tar" \
+ ./sources
diff --git a/.gitlab/single-step-instrumentation-tests.yml b/.gitlab/single-step-instrumentation-tests.yml
index 7b886fae310..11e12a45f8e 100644
--- a/.gitlab/single-step-instrumentation-tests.yml
+++ b/.gitlab/single-step-instrumentation-tests.yml
@@ -37,6 +37,16 @@ onboarding_tests:
extends: .base_job_onboarding_tests
stage: single-step-instrumentation-tests
needs: [package-snapshot,package-snapshot-arm]
+ rules:
+ - if: $JS_PACKAGE_VERSION
+ when: never
+ - if: '$CI_COMMIT_TAG =~ /^v.*/'
+ when: never
+ - if: $CI_COMMIT_BRANCH == 'master'
+ when: on_success
+ allow_failure: true
+ - when: manual
+ allow_failure: true
allow_failure: false
variables:
TEST_LIBRARY: nodejs
diff --git a/.npmignore b/.npmignore
index f3a1b1b064f..6955a334148 100644
--- a/.npmignore
+++ b/.npmignore
@@ -12,6 +12,7 @@
!index.js
!esbuild.js
!init.js
+!initialize.mjs
!loader-hook.mjs
!register.js
!package.json
diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv
index 9565bc3f553..eca2504caaa 100644
--- a/LICENSE-3rdparty.csv
+++ b/LICENSE-3rdparty.csv
@@ -12,17 +12,14 @@ require,dc-polyfill,MIT,Copyright 2023 Datadog Inc.
require,ignore,MIT,Copyright 2013 Kael Zhang and contributors
require,import-in-the-middle,Apache license 2.0,Copyright 2021 Datadog Inc.
require,int64-buffer,MIT,Copyright 2015-2016 Yusuke Kawasaki
-require,ipaddr.js,MIT,Copyright 2011-2017 whitequark
require,istanbul-lib-coverage,BSD-3-Clause,Copyright 2012-2015 Yahoo! Inc.
require,jest-docblock,MIT,Copyright Meta Platforms, Inc. and affiliates.
require,koalas,MIT,Copyright 2013-2017 Brian Woodward
require,limiter,MIT,Copyright 2011 John Hurliman
require,lodash.sortby,MIT,Copyright JS Foundation and other contributors
require,lru-cache,ISC,Copyright (c) 2010-2022 Isaac Z. Schlueter and Contributors
-require,methods,MIT,Copyright 2013-2014 TJ Holowaychuk
require,module-details-from-path,MIT,Copyright 2016 Thomas Watson Steen
require,msgpack-lite,MIT,Copyright 2015 Yusuke Kawasaki
-require,node-abort-controller,MIT,Copyright (c) 2019 Steve Faulkner
require,opentracing,MIT,Copyright 2016 Resonance Labs Inc
require,path-to-regexp,MIT,Copyright 2014 Blake Embrey
require,pprof-format,MIT,Copyright 2022 Stephen Belanger
diff --git a/README.md b/README.md
index 349b0a51478..3a7224b8d44 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,6 @@
[](https://www.npmjs.com/package/dd-trace)
[](https://www.npmjs.com/package/dd-trace/v/latest-node16)
-[](https://www.npmjs.com/package/dd-trace/v/latest-node14)
[](https://codecov.io/gh/DataDog/dd-trace-js)
@@ -28,12 +27,12 @@ Most of the documentation for `dd-trace` is available on these webpages:
| :---: | :---: | :---: | :---: | :---: | :---: |
| [`v1`](https://github.com/DataDog/dd-trace-js/tree/v1.x) |  | `>= v12` | **End of Life** | 2021-07-13 | 2022-02-25 |
| [`v2`](https://github.com/DataDog/dd-trace-js/tree/v2.x) |  | `>= v12` | **End of Life** | 2022-01-28 | 2023-08-15 |
-| [`v3`](https://github.com/DataDog/dd-trace-js/tree/v3.x) |  | `>= v14` | **Maintenance** | 2022-08-15 | 2024-05-15 |
+| [`v3`](https://github.com/DataDog/dd-trace-js/tree/v3.x) |  | `>= v14` | **End of Life** | 2022-08-15 | 2024-05-15 |
| [`v4`](https://github.com/DataDog/dd-trace-js/tree/v4.x) |  | `>= v16` | **Maintenance** | 2023-05-12 | 2025-01-11 |
| [`v5`](https://github.com/DataDog/dd-trace-js/tree/v5.x) |  | `>= v18` | **Current** | 2024-01-11 | Unknown |
-We currently maintain three release lines, namely `v5`, `v4` and `v3`.
-Features and bug fixes that are merged are released to the `v5` line and, if appropriate, also the `v4` & `v3` line.
+We currently maintain two release lines, namely `v5`, and `v4`.
+Features and bug fixes that are merged are released to the `v5` line and, if appropriate, also `v4`.
For any new projects it is recommended to use the `v5` release line:
@@ -42,7 +41,7 @@ $ npm install dd-trace
$ yarn add dd-trace
```
-However, existing projects that already use the `v4` & `v3` release line, or projects that need to support EOL versions of Node.js, may continue to use these release lines.
+However, existing projects that already use the `v4` release line, or projects that need to support EOL versions of Node.js, may continue to use these release lines.
This is done by specifying the version when installing the package.
```sh
@@ -67,18 +66,9 @@ Changes associated with each individual release are documented on the [GitHub Re
Please read the [CONTRIBUTING.md](https://github.com/DataDog/dd-trace-js/blob/master/CONTRIBUTING.md) document before contributing to this open source project.
-## Experimental ESM Support
+## EcmaScript Modules (ESM) Support
-> **Warning**
->
-> ESM support has been temporarily disabled starting from Node 20 as significant
-> changes are in progress.
-
-ESM support is currently in the experimental stages, while CJS has been supported
-since inception. This means that code loaded using `require()` should work fine
-but code loaded using `import` might not always work.
-
-Use the following command to enable experimental ESM support with your application:
+ESM support requires an additional command-line argument. Use the following to enable experimental ESM support with your application:
Node.js < v20.6
@@ -110,9 +100,9 @@ If you would like to trace your bundled application then please read this page o
Please refer to the [SECURITY.md](https://github.com/DataDog/dd-trace-js/blob/master/SECURITY.md) document if you have found a security issue.
+
## Datadog With OpenTelemetery
-Please refer to the [Node.js Custom Instrumentation using OpenTelemetry API](https://docs.datadoghq.com/tracing/trace_collection/custom_instrumentation/nodejs/otel/) document. It includes information on how to use the OpenTelemetry API with dd-trace-js
+Please refer to the [Node.js Custom Instrumentation using OpenTelemetry API](https://docs.datadoghq.com/tracing/trace_collection/custom_instrumentation/nodejs/otel/) document. It includes information on how to use the OpenTelemetry API with dd-trace-js.
Note that our internal implementation of the OpenTelemetry API is currently set within the version range `>=1.0.0 <1.9.0`. This range will be updated at a regular cadence therefore, we recommend updating your tracer to the latest release to ensure up to date support.
-
diff --git a/ci/init.js b/ci/init.js
index 3599b2e05f4..b54e29abd4d 100644
--- a/ci/init.js
+++ b/ci/init.js
@@ -4,6 +4,7 @@ const { isTrue } = require('../packages/dd-trace/src/util')
const isJestWorker = !!process.env.JEST_WORKER_ID
const isCucumberWorker = !!process.env.CUCUMBER_WORKER_ID
+const isMochaWorker = !!process.env.MOCHA_WORKER_ID
const options = {
startupLogs: false,
@@ -44,6 +45,12 @@ if (isCucumberWorker) {
}
}
+if (isMochaWorker) {
+ options.experimental = {
+ exporter: 'mocha_worker'
+ }
+}
+
if (shouldInit) {
tracer.init(options)
tracer.use('fs', false)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000000..f35f562bf71
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,6 @@
+Note: If you're viewing this `docs/` directory on GitHub
+you'll find that none of the links work and that most of
+the content is missing. This directory simply contains
+base files used to generate docs for the API docs site:
+
+[API Documentation](https://datadoghq.dev/dd-trace-js/)
diff --git a/docs/test.ts b/docs/test.ts
index abdac7a7daf..91fafd48734 100644
--- a/docs/test.ts
+++ b/docs/test.ts
@@ -116,6 +116,9 @@ tracer.init({
apiSecurity: {
enabled: true,
requestSampling: 1.0
+ },
+ rasp: {
+ enabled: true
}
}
});
diff --git a/ext/exporters.d.ts b/ext/exporters.d.ts
index d2ebaefe267..07bc2cd29e3 100644
--- a/ext/exporters.d.ts
+++ b/ext/exporters.d.ts
@@ -5,6 +5,7 @@ declare const exporters: {
AGENT_PROXY: 'agent_proxy',
JEST_WORKER: 'jest_worker',
CUCUMBER_WORKER: 'cucumber_worker'
+ MOCHA_WORKER: 'mocha_worker'
}
export = exporters
diff --git a/ext/exporters.js b/ext/exporters.js
index b615d28f459..770116c3152 100644
--- a/ext/exporters.js
+++ b/ext/exporters.js
@@ -5,5 +5,6 @@ module.exports = {
DATADOG: 'datadog',
AGENT_PROXY: 'agent_proxy',
JEST_WORKER: 'jest_worker',
- CUCUMBER_WORKER: 'cucumber_worker'
+ CUCUMBER_WORKER: 'cucumber_worker',
+ MOCHA_WORKER: 'mocha_worker'
}
diff --git a/ext/tags.d.ts b/ext/tags.d.ts
index 0aafd03138f..1acf4f4f38e 100644
--- a/ext/tags.d.ts
+++ b/ext/tags.d.ts
@@ -10,6 +10,7 @@ declare const tags: {
MANUAL_DROP: 'manual.drop'
MEASURED: '_dd.measured'
BASE_SERVICE: '_dd.base_service'
+ DD_PARENT_ID: '_dd.parent_id'
HTTP_URL: 'http.url'
HTTP_METHOD: 'http.method'
HTTP_STATUS_CODE: 'http.status_code'
diff --git a/ext/tags.js b/ext/tags.js
index e270a6bde3a..c12aa1a57dc 100644
--- a/ext/tags.js
+++ b/ext/tags.js
@@ -13,6 +13,7 @@ const tags = {
MANUAL_DROP: 'manual.drop',
MEASURED: '_dd.measured',
BASE_SERVICE: '_dd.base_service',
+ DD_PARENT_ID: '_dd.parent_id',
// HTTP
HTTP_URL: 'http.url',
diff --git a/index.d.ts b/index.d.ts
index 6684011efd3..51d87993ab4 100644
--- a/index.d.ts
+++ b/index.d.ts
@@ -113,9 +113,15 @@ interface Tracer extends opentracing.Tracer {
wrap any> (name: string, options: (...args: any[]) => tracer.TraceOptions & tracer.SpanOptions, fn: T): T;
/**
- * Create and return a string that can be included in the of a
- * document to enable RUM tracing to include it. The resulting string
- * should not be cached.
+ * Returns an HTML string containing tags that should be included in
+ * the of a document to enable correlating the current trace with the
+ * RUM view. Otherwise, it is not possible to associate the trace used to
+ * generate the initial HTML document with a given RUM view. The resulting
+ * HTML document should not be cached as the meta tags are time-sensitive
+ * and are associated with a specific user.
+ *
+ * Note that this feature is currently not supported by the backend and
+ * using it will have no effect.
*/
getRumData (): string;
@@ -684,6 +690,15 @@ declare namespace tracer {
* @default 0.1
*/
requestSampling?: number
+ },
+ /**
+ * Configuration for RASP
+ */
+ rasp?: {
+ /** Whether to enable RASP.
+ * @default false
+ */
+ enabled?: boolean
}
};
diff --git a/initialize.mjs b/initialize.mjs
new file mode 100644
index 00000000000..e7b33de492b
--- /dev/null
+++ b/initialize.mjs
@@ -0,0 +1,52 @@
+/**
+ * This file serves one of two purposes, depending on how it's used.
+ *
+ * If used with --import, it will import init.js and register the loader hook.
+ * If used with --loader, it will act as the loader hook, except that it will
+ * also import init.js inside the source code of the entrypoint file.
+ *
+ * The result is that no matter how this file is used, so long as it's with
+ * one of the two flags, the tracer will always be initialized, and the loader
+ * hook will always be active for ESM support.
+ */
+
+import { isMainThread } from 'worker_threads'
+
+import { fileURLToPath } from 'node:url'
+import {
+ load as origLoad,
+ resolve as origResolve,
+ getFormat as origGetFormat,
+ getSource as origGetSource
+} from 'import-in-the-middle/hook.mjs'
+
+let hasInsertedInit = false
+function insertInit (result) {
+ if (!hasInsertedInit) {
+ hasInsertedInit = true
+ result.source = `
+import '${fileURLToPath(new URL('./init.js', import.meta.url))}';
+${result.source}`
+ }
+ return result
+}
+
+export async function load (...args) {
+ return insertInit(await origLoad(...args))
+}
+
+export const resolve = origResolve
+
+export const getFormat = origGetFormat
+
+export async function getSource (...args) {
+ return insertInit(await origGetSource(...args))
+}
+
+if (isMainThread) {
+ await import('./init.js')
+ const { register } = await import('node:module')
+ if (register) {
+ register('./loader-hook.mjs', import.meta.url)
+ }
+}
diff --git a/integration-tests/ci-visibility.spec.js b/integration-tests/ci-visibility.spec.js
index 8918e6e63c1..4368f761cd0 100644
--- a/integration-tests/ci-visibility.spec.js
+++ b/integration-tests/ci-visibility.spec.js
@@ -31,7 +31,11 @@ const {
TEST_EARLY_FLAKE_ENABLED,
TEST_NAME,
JEST_DISPLAY_NAME,
- TEST_EARLY_FLAKE_ABORT_REASON
+ TEST_EARLY_FLAKE_ABORT_REASON,
+ TEST_COMMAND,
+ TEST_MODULE,
+ MOCHA_IS_PARALLEL,
+ TEST_SOURCE_START
} = require('../packages/dd-trace/src/plugins/util/test')
const { ERROR_MESSAGE } = require('../packages/dd-trace/src/constants')
@@ -45,7 +49,7 @@ const mochaCommonOptions = {
const jestCommonOptions = {
name: 'jest',
- dependencies: ['jest', 'chai@v4', 'jest-jasmine2'],
+ dependencies: ['jest', 'chai@v4', 'jest-jasmine2', 'jest-environment-jsdom'],
expectedStdout: 'Test Suites: 2 passed',
expectedCoverageFiles: [
'ci-visibility/test/sum.js',
@@ -58,7 +62,7 @@ const testFrameworks = [
{
...mochaCommonOptions,
testFile: 'ci-visibility/run-mocha.js',
- dependencies: ['mocha', 'chai@v4', 'nyc', 'mocha-each'],
+ dependencies: ['mocha', 'chai@v4', 'nyc', 'mocha-each', 'workerpool'],
expectedCoverageFiles: [
'ci-visibility/run-mocha.js',
'ci-visibility/test/sum.js',
@@ -152,11 +156,49 @@ testFrameworks.forEach(({
})
}).timeout(50000)
- it('does not init CI Visibility when running in parallel mode', (done) => {
- receiver.assertPayloadReceived(() => {
- const error = new Error('it should not report tests')
- done(error)
- }, ({ url }) => url === '/api/v2/citestcycle', 3000).catch(() => {})
+ it('works with parallel mode', (done) => {
+ const eventsPromise = receiver
+ .gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
+ const events = payloads.flatMap(({ payload }) => payload.events)
+ const sessionEventContent = events.find(event => event.type === 'test_session_end').content
+ const moduleEventContent = events.find(event => event.type === 'test_module_end').content
+ const suites = events.filter(event => event.type === 'test_suite_end').map(event => event.content)
+ const tests = events.filter(event => event.type === 'test').map(event => event.content)
+
+ assert.equal(sessionEventContent.meta[MOCHA_IS_PARALLEL], 'true')
+ assert.equal(
+ sessionEventContent.test_session_id.toString(10),
+ moduleEventContent.test_session_id.toString(10)
+ )
+ suites.forEach(({
+ meta,
+ test_suite_id: testSuiteId,
+ test_module_id: testModuleId,
+ test_session_id: testSessionId
+ }) => {
+ assert.exists(meta[TEST_COMMAND])
+ assert.exists(meta[TEST_MODULE])
+ assert.exists(testSuiteId)
+ assert.equal(testModuleId.toString(10), moduleEventContent.test_module_id.toString(10))
+ assert.equal(testSessionId.toString(10), moduleEventContent.test_session_id.toString(10))
+ })
+
+ tests.forEach(({
+ meta,
+ metrics,
+ test_suite_id: testSuiteId,
+ test_module_id: testModuleId,
+ test_session_id: testSessionId
+ }) => {
+ assert.exists(meta[TEST_COMMAND])
+ assert.exists(meta[TEST_MODULE])
+ assert.exists(testSuiteId)
+ assert.equal(testModuleId.toString(10), moduleEventContent.test_module_id.toString(10))
+ assert.equal(testSessionId.toString(10), moduleEventContent.test_session_id.toString(10))
+ assert.propertyVal(meta, MOCHA_IS_PARALLEL, 'true')
+ assert.exists(metrics[TEST_SOURCE_START])
+ })
+ })
childProcess = fork(testFile, {
cwd,
@@ -175,7 +217,65 @@ testFrameworks.forEach(({
testOutput += chunk.toString()
})
childProcess.on('message', () => {
- assert.include(testOutput, 'Unable to initialize CI Visibility because Mocha is running in parallel mode.')
+ eventsPromise.then(() => {
+ assert.notInclude(testOutput, 'TypeError')
+ assert.notInclude(
+ testOutput, 'Unable to initialize CI Visibility because Mocha is running in parallel mode.'
+ )
+ done()
+ }).catch(done)
+ })
+ })
+
+ it('works with parallel mode when run with the cli', (done) => {
+ const eventsPromise = receiver
+ .gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
+ const events = payloads.flatMap(({ payload }) => payload.events)
+ const sessionEventContent = events.find(event => event.type === 'test_session_end').content
+ const suites = events.filter(event => event.type === 'test_suite_end').map(event => event.content)
+ const tests = events.filter(event => event.type === 'test').map(event => event.content)
+
+ assert.equal(sessionEventContent.meta[MOCHA_IS_PARALLEL], 'true')
+ assert.equal(suites.length, 2)
+ assert.equal(tests.length, 2)
+ })
+ childProcess = exec('mocha --parallel --jobs 2 ./ci-visibility/test/ci-visibility-test*', {
+ cwd,
+ env: getCiVisAgentlessConfig(receiver.port),
+ stdio: 'pipe'
+ })
+ childProcess.stdout.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ childProcess.stderr.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ childProcess.on('exit', () => {
+ eventsPromise.then(() => {
+ assert.notInclude(testOutput, 'TypeError')
+ assert.notInclude(
+ testOutput, 'Unable to initialize CI Visibility because Mocha is running in parallel mode.'
+ )
+ done()
+ }).catch(done)
+ })
+ })
+
+ it('does not blow up when workerpool is used outside of a test', (done) => {
+ childProcess = exec('node ./ci-visibility/run-workerpool.js', {
+ cwd,
+ env: getCiVisAgentlessConfig(receiver.port),
+ stdio: 'pipe'
+ })
+ childProcess.stdout.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ childProcess.stderr.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ childProcess.on('exit', (code) => {
+ assert.include(testOutput, 'result 7')
+ assert.equal(code, 0)
done()
})
})
@@ -328,6 +428,7 @@ testFrameworks.forEach(({
done()
}).catch(done)
})
+
it('reports tests when using agentless', (done) => {
childProcess = fork(testFile, {
cwd,
@@ -347,6 +448,7 @@ testFrameworks.forEach(({
done()
}).catch(done)
})
+
it('reports tests when using evp proxy', (done) => {
childProcess = fork(testFile, {
cwd,
@@ -683,6 +785,49 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+ it('calculates executable lines even if there have been skipped suites', (done) => {
+ receiver.setSettings({
+ itr_enabled: true,
+ code_coverage: true,
+ tests_skipping: true
+ })
+
+ receiver.setSuitesToSkip([{
+ type: 'suite',
+ attributes: {
+ suite: 'ci-visibility/test-total-code-coverage/test-skipped.js'
+ }
+ }])
+
+ const eventsPromise = receiver
+ .gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
+ const events = payloads.flatMap(({ payload }) => payload.events)
+ const testSession = events.find(event => event.type === 'test_session_end').content
+
+ // Before https://github.com/DataDog/dd-trace-js/pull/4336, this would've been 100%
+ // The reason is that skipping jest's `addUntestedFiles`, we would not see unexecuted lines.
+ // In this cause, these would be from the `unused-dependency.js` file.
+ // It is 50% now because we only cover 1 out of 2 files (`used-dependency.js`).
+ assert.propertyVal(testSession.metrics, TEST_CODE_COVERAGE_LINES_PCT, 50)
+ })
+
+ childProcess = exec(
+ runTestsWithCoverageCommand, // Requirement: the user must've opted in to code coverage
+ {
+ cwd,
+ env: {
+ ...getCiVisAgentlessConfig(receiver.port),
+ TESTS_TO_RUN: 'ci-visibility/test-total-code-coverage/test-',
+ COLLECT_COVERAGE_FROM: '**/test-total-code-coverage/**'
+ },
+ stdio: 'inherit'
+ }
+ )
+
+ childProcess.on('exit', () => {
+ eventsPromise.then(done).catch(done)
+ })
+ })
}
const reportingOptions = ['agentless', 'evp proxy']
@@ -1414,6 +1559,88 @@ testFrameworks.forEach(({
eventsPromise.then(() => done()).catch(done)
})
})
+
+ it('works with jsdom', (done) => {
+ const envVars = reportingOption === 'agentless'
+ ? getCiVisAgentlessConfig(receiver.port)
+ : getCiVisEvpProxyConfig(receiver.port)
+ if (reportingOption === 'evp proxy') {
+ receiver.setInfoResponse({ endpoints: ['/evp_proxy/v4'] })
+ }
+ // Tests from ci-visibility/test/ci-visibility-test-2.js will be considered new
+ receiver.setKnownTests({
+ [name]: {
+ 'ci-visibility/test/ci-visibility-test.js': ['ci visibility can report tests']
+ }
+ })
+ const NUM_RETRIES_EFD = 3
+ receiver.setSettings({
+ itr_enabled: false,
+ code_coverage: false,
+ tests_skipping: false,
+ early_flake_detection: {
+ enabled: true,
+ slow_test_retries: {
+ '5s': NUM_RETRIES_EFD
+ },
+ faulty_session_threshold: 100
+ }
+ })
+
+ const eventsPromise = receiver
+ .gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
+ const events = payloads.flatMap(({ payload }) => payload.events)
+
+ const tests = events.filter(event => event.type === 'test').map(event => event.content)
+
+ // no other tests are considered new
+ const oldTests = tests.filter(test =>
+ test.meta[TEST_SUITE] === 'ci-visibility/test/ci-visibility-test.js'
+ )
+ oldTests.forEach(test => {
+ assert.notProperty(test.meta, TEST_IS_NEW)
+ })
+ assert.equal(oldTests.length, 1)
+
+ const newTests = tests.filter(test =>
+ test.meta[TEST_SUITE] === 'ci-visibility/test/ci-visibility-test-2.js'
+ )
+ newTests.forEach(test => {
+ assert.propertyVal(test.meta, TEST_IS_NEW, 'true')
+ })
+ const retriedTests = newTests.filter(test => test.meta[TEST_IS_RETRY] === 'true')
+ // all but one has been retried
+ assert.equal(
+ newTests.length - 1,
+ retriedTests.length
+ )
+ assert.equal(retriedTests.length, NUM_RETRIES_EFD)
+ // Test name does not change
+ newTests.forEach(test => {
+ assert.equal(test.meta[TEST_NAME], 'ci visibility 2 can report tests 2')
+ })
+ })
+
+ childProcess = exec(
+ runTestsWithCoverageCommand,
+ {
+ cwd,
+ env: {
+ ...envVars,
+ TESTS_TO_RUN: 'test/ci-visibility-test',
+ ENABLE_JSDOM: true,
+ DD_TRACE_DEBUG: 1,
+ DD_TRACE_LOG_LEVEL: 'warn'
+ },
+ stdio: 'inherit'
+ }
+ )
+ childProcess.on('exit', () => {
+ eventsPromise.then(() => {
+ done()
+ }).catch(done)
+ })
+ })
}
})
})
@@ -1447,6 +1674,7 @@ testFrameworks.forEach(({
testSpans.forEach(testSpan => {
assert.equal(testSpan.meta[TEST_SOURCE_FILE].startsWith('ci-visibility/test/ci-visibility-test'), true)
+ assert.exists(testSpan.metrics[TEST_SOURCE_START])
})
done()
@@ -1559,6 +1787,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('does not init if DD_API_KEY is not set', (done) => {
receiver.assertMessageReceived(() => {
done(new Error('Should not create spans'))
@@ -1587,6 +1816,7 @@ testFrameworks.forEach(({
done()
})
})
+
it('can report git metadata', (done) => {
const searchCommitsRequestPromise = receiver.payloadReceived(
({ url }) => url === '/api/v2/git/repository/search_commits'
@@ -1618,6 +1848,7 @@ testFrameworks.forEach(({
stdio: 'pipe'
})
})
+
it('can report code coverage', (done) => {
let testOutput
const libraryConfigRequestPromise = receiver.payloadReceived(
@@ -1681,6 +1912,7 @@ testFrameworks.forEach(({
done()
})
})
+
it('does not report code coverage if disabled by the API', (done) => {
receiver.setSettings({
itr_enabled: false,
@@ -1717,6 +1949,7 @@ testFrameworks.forEach(({
}
)
})
+
it('can skip suites received by the intelligent test runner API and still reports code coverage', (done) => {
receiver.setSuitesToSkip([{
type: 'suite',
@@ -1778,6 +2011,7 @@ testFrameworks.forEach(({
}
)
})
+
it('marks the test session as skipped if every suite is skipped', (done) => {
receiver.setSuitesToSkip(
[
@@ -1816,6 +2050,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('does not skip tests if git metadata upload fails', (done) => {
receiver.setSuitesToSkip([{
type: 'suite',
@@ -1859,6 +2094,7 @@ testFrameworks.forEach(({
}
)
})
+
it('does not skip tests if test skipping is disabled by the API', (done) => {
receiver.setSettings({
itr_enabled: true,
@@ -1898,6 +2134,7 @@ testFrameworks.forEach(({
}
)
})
+
it('does not skip suites if suite is marked as unskippable', (done) => {
receiver.setSuitesToSkip([
{
@@ -1978,6 +2215,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('only sets forced to run if suite was going to be skipped by ITR', (done) => {
receiver.setSuitesToSkip([
{
@@ -2052,6 +2290,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('sets _dd.ci.itr.tests_skipped to false if the received suite is not skipped', (done) => {
receiver.setSuitesToSkip([{
type: 'suite',
@@ -2086,6 +2325,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('reports itr_correlation_id in test suites', (done) => {
const itrCorrelationId = '4321'
receiver.setItrCorrelationId(itrCorrelationId)
@@ -2154,6 +2394,7 @@ testFrameworks.forEach(({
})
})
})
+
it('reports errors in test sessions', (done) => {
const eventsPromise = receiver
.gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
@@ -2188,6 +2429,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('can report git metadata', (done) => {
const infoRequestPromise = receiver.payloadReceived(({ url }) => url === '/info')
const searchCommitsRequestPromise = receiver.payloadReceived(
@@ -2227,6 +2469,7 @@ testFrameworks.forEach(({
stdio: 'pipe'
})
})
+
it('can report code coverage', (done) => {
let testOutput
const libraryConfigRequestPromise = receiver.payloadReceived(
@@ -2291,6 +2534,7 @@ testFrameworks.forEach(({
done()
})
})
+
it('does not report code coverage if disabled by the API', (done) => {
receiver.setSettings({
itr_enabled: false,
@@ -2321,6 +2565,7 @@ testFrameworks.forEach(({
}
)
})
+
it('can skip suites received by the intelligent test runner API and still reports code coverage', (done) => {
receiver.setSuitesToSkip([{
type: 'suite',
@@ -2376,6 +2621,7 @@ testFrameworks.forEach(({
}
)
})
+
it('marks the test session as skipped if every suite is skipped', (done) => {
receiver.setSuitesToSkip(
[
@@ -2414,44 +2660,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
- it('marks the test session as skipped if every suite is skipped', (done) => {
- receiver.setSuitesToSkip(
- [
- {
- type: 'suite',
- attributes: {
- suite: 'ci-visibility/test/ci-visibility-test.js'
- }
- },
- {
- type: 'suite',
- attributes: {
- suite: 'ci-visibility/test/ci-visibility-test-2.js'
- }
- }
- ]
- )
- const eventsPromise = receiver
- .gatherPayloadsMaxTimeout(({ url }) => url.endsWith('/api/v2/citestcycle'), (payloads) => {
- const events = payloads.flatMap(({ payload }) => payload.events)
- const testSession = events.find(event => event.type === 'test_session_end').content
- assert.propertyVal(testSession.meta, TEST_STATUS, 'skip')
- })
- childProcess = exec(
- runTestsWithCoverageCommand,
- {
- cwd,
- env: getCiVisEvpProxyConfig(receiver.port),
- stdio: 'inherit'
- }
- )
- childProcess.on('exit', () => {
- eventsPromise.then(() => {
- done()
- }).catch(done)
- })
- })
it('does not skip tests if git metadata upload fails', (done) => {
receiver.assertPayloadReceived(() => {
const error = new Error('should not request skippable')
@@ -2488,6 +2697,7 @@ testFrameworks.forEach(({
}
)
})
+
it('does not skip tests if test skipping is disabled by the API', (done) => {
receiver.assertPayloadReceived(() => {
const error = new Error('should not request skippable')
@@ -2528,6 +2738,7 @@ testFrameworks.forEach(({
}
)
})
+
it('sets _dd.ci.itr.tests_skipped to false if the received suite is not skipped', (done) => {
receiver.setSuitesToSkip([{
type: 'suite',
@@ -2562,6 +2773,7 @@ testFrameworks.forEach(({
}).catch(done)
})
})
+
it('reports itr_correlation_id in test suites', (done) => {
const itrCorrelationId = '4321'
receiver.setItrCorrelationId(itrCorrelationId)
diff --git a/integration-tests/ci-visibility/run-jest.js b/integration-tests/ci-visibility/run-jest.js
index c8a740ea331..a1f236be7a2 100644
--- a/integration-tests/ci-visibility/run-jest.js
+++ b/integration-tests/ci-visibility/run-jest.js
@@ -20,6 +20,14 @@ if (process.env.OLD_RUNNER) {
options.testRunner = 'jest-jasmine2'
}
+if (process.env.ENABLE_JSDOM) {
+ options.testEnvironment = 'jsdom'
+}
+
+if (process.env.COLLECT_COVERAGE_FROM) {
+ options.collectCoverageFrom = process.env.COLLECT_COVERAGE_FROM.split(',')
+}
+
jest.runCLI(
options,
options.projects
diff --git a/integration-tests/ci-visibility/run-jest.mjs b/integration-tests/ci-visibility/run-jest.mjs
index a35ddda382c..a9ecb24d0c6 100644
--- a/integration-tests/ci-visibility/run-jest.mjs
+++ b/integration-tests/ci-visibility/run-jest.mjs
@@ -22,6 +22,10 @@ if (process.env.OLD_RUNNER) {
options.testRunner = 'jest-jasmine2'
}
+if (process.env.ENABLE_JSDOM) {
+ options.testEnvironment = 'jsdom'
+}
+
jest.runCLI(
options,
options.projects
diff --git a/integration-tests/ci-visibility/run-workerpool.js b/integration-tests/ci-visibility/run-workerpool.js
new file mode 100644
index 00000000000..8a77c9e315b
--- /dev/null
+++ b/integration-tests/ci-visibility/run-workerpool.js
@@ -0,0 +1,23 @@
+// eslint-disable-next-line
+const workerpool = require('workerpool')
+const pool = workerpool.pool({ workerType: 'process' })
+
+function add (a, b) {
+ return a + b
+}
+
+pool
+ .exec(add, [3, 4])
+ .then((result) => {
+ // eslint-disable-next-line no-console
+ console.log('result', result) // outputs 7
+ return pool.terminate()
+ })
+ .catch(function (err) {
+ // eslint-disable-next-line no-console
+ console.error(err)
+ process.exit(1)
+ })
+ .then(() => {
+ process.exit(0)
+ })
diff --git a/integration-tests/ci-visibility/test-total-code-coverage/test-run.js b/integration-tests/ci-visibility/test-total-code-coverage/test-run.js
new file mode 100644
index 00000000000..2256f75f069
--- /dev/null
+++ b/integration-tests/ci-visibility/test-total-code-coverage/test-run.js
@@ -0,0 +1,8 @@
+const { expect } = require('chai')
+const sum = require('./used-dependency')
+
+describe('test-run', () => {
+ it('can report tests', () => {
+ expect(sum(1, 2)).to.equal(3)
+ })
+})
diff --git a/integration-tests/ci-visibility/test-total-code-coverage/test-skipped.js b/integration-tests/ci-visibility/test-total-code-coverage/test-skipped.js
new file mode 100644
index 00000000000..1410740bfa3
--- /dev/null
+++ b/integration-tests/ci-visibility/test-total-code-coverage/test-skipped.js
@@ -0,0 +1,8 @@
+const { expect } = require('chai')
+const sum = require('./unused-dependency')
+
+describe('test-skipped', () => {
+ it('can report tests', () => {
+ expect(sum(1, 2)).to.equal(3)
+ })
+})
diff --git a/integration-tests/ci-visibility/test-total-code-coverage/unused-dependency.js b/integration-tests/ci-visibility/test-total-code-coverage/unused-dependency.js
new file mode 100644
index 00000000000..2012896b44c
--- /dev/null
+++ b/integration-tests/ci-visibility/test-total-code-coverage/unused-dependency.js
@@ -0,0 +1,3 @@
+module.exports = function (a, b) {
+ return a + b
+}
diff --git a/integration-tests/ci-visibility/test-total-code-coverage/used-dependency.js b/integration-tests/ci-visibility/test-total-code-coverage/used-dependency.js
new file mode 100644
index 00000000000..2012896b44c
--- /dev/null
+++ b/integration-tests/ci-visibility/test-total-code-coverage/used-dependency.js
@@ -0,0 +1,3 @@
+module.exports = function (a, b) {
+ return a + b
+}
diff --git a/integration-tests/ci-visibility/test/selenium-no-framework.js b/integration-tests/ci-visibility/test/selenium-no-framework.js
new file mode 100644
index 00000000000..cca24586bfd
--- /dev/null
+++ b/integration-tests/ci-visibility/test/selenium-no-framework.js
@@ -0,0 +1,30 @@
+const { By, Builder } = require('selenium-webdriver')
+const chrome = require('selenium-webdriver/chrome')
+
+async function run () {
+ const options = new chrome.Options()
+ options.addArguments('--headless')
+ const build = new Builder().forBrowser('chrome').setChromeOptions(options)
+ const driver = await build.build()
+
+ await driver.get(process.env.WEB_APP_URL)
+
+ await driver.getTitle()
+
+ await driver.manage().setTimeouts({ implicit: 500 })
+
+ const helloWorld = await driver.findElement(By.className('hello-world'))
+
+ await helloWorld.getText()
+
+ return driver.quit()
+}
+
+run()
+ .then(() => {
+ process.exit(0)
+ }).catch((err) => {
+ // eslint-disable-next-line no-console
+ console.error(err)
+ process.exit(1)
+ })
diff --git a/integration-tests/helpers.js b/integration-tests/helpers.js
index 37838e774e4..b8972540e1f 100644
--- a/integration-tests/helpers.js
+++ b/integration-tests/helpers.js
@@ -183,12 +183,12 @@ function spawnProc (filename, options = {}, stdioHandler) {
stdioHandler(data)
}
// eslint-disable-next-line no-console
- console.log(data.toString())
+ if (!options.silent) console.log(data.toString())
})
proc.stderr.on('data', data => {
// eslint-disable-next-line no-console
- console.error(data.toString())
+ if (!options.silent) console.error(data.toString())
})
})
}
diff --git a/integration-tests/init.spec.js b/integration-tests/init.spec.js
index d093d1d53df..f90968fc8c6 100644
--- a/integration-tests/init.spec.js
+++ b/integration-tests/init.spec.js
@@ -7,51 +7,101 @@ const path = require('path')
const DD_INJECTION_ENABLED = 'tracing'
-describe('init.js', () => {
- let cwd, proc, sandbox
-
- async function runTest (cwd, env, expected) {
- return new Promise((resolve, reject) => {
- spawnProc(path.join(cwd, 'init/index.js'), { cwd, env }, data => {
- try {
- assert.strictEqual(data.toString(), expected)
- resolve()
- } catch (e) {
- reject(e)
- }
- }).then(subproc => {
- proc = subproc
- })
- })
- }
+let cwd, proc, sandbox
- before(async () => {
- sandbox = await createSandbox()
- cwd = sandbox.folder
- })
- afterEach(() => {
- proc && proc.kill()
- })
- after(() => {
- return sandbox.remove()
+async function runTest (cwd, file, env, expected) {
+ return new Promise((resolve, reject) => {
+ spawnProc(path.join(cwd, file), { cwd, env, silent: true }, data => {
+ try {
+ assert.strictEqual(data.toString(), expected)
+ resolve()
+ } catch (e) {
+ reject(e)
+ }
+ }).then(subproc => {
+ proc = subproc
+ })
})
+}
+function testInjectionScenarios (arg, filename, esmWorks = false) {
context('when dd-trace is not in the app dir', () => {
- const NODE_OPTIONS = `--require ${path.join(__dirname, '..', 'init.js')}`
+ const NODE_OPTIONS = `--no-warnings --${arg} ${path.join(__dirname, '..', filename)}`
it('should initialize the tracer, if no DD_INJECTION_ENABLED', () => {
- return runTest(cwd, { NODE_OPTIONS }, 'true\n')
+ return runTest(cwd, 'init/trace.js', { NODE_OPTIONS }, 'true\n')
})
it('should not initialize the tracer, if DD_INJECTION_ENABLED', () => {
- return runTest(cwd, { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'false\n')
+ return runTest(cwd, 'init/trace.js', { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'false\n')
+ })
+ it('should initialize instrumentation, if no DD_INJECTION_ENABLED', () => {
+ return runTest(cwd, 'init/instrument.js', { NODE_OPTIONS }, 'true\n')
+ })
+ it('should not initialize instrumentation, if DD_INJECTION_ENABLED', () => {
+ return runTest(cwd, 'init/instrument.js', { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'false\n')
+ })
+ it(`should ${esmWorks ? '' : 'not '}initialize ESM instrumentation, if no DD_INJECTION_ENABLED`, () => {
+ return runTest(cwd, 'init/instrument.mjs', { NODE_OPTIONS }, `${esmWorks}\n`)
+ })
+ it('should not initialize ESM instrumentation, if DD_INJECTION_ENABLED', () => {
+ return runTest(cwd, 'init/instrument.mjs', { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'false\n')
})
})
context('when dd-trace in the app dir', () => {
- const NODE_OPTIONS = '--require dd-trace/init.js'
+ const NODE_OPTIONS = `--no-warnings --${arg} dd-trace/${filename}`
it('should initialize the tracer, if no DD_INJECTION_ENABLED', () => {
- return runTest(cwd, { NODE_OPTIONS }, 'true\n')
+ return runTest(cwd, 'init/trace.js', { NODE_OPTIONS }, 'true\n')
})
it('should initialize the tracer, if DD_INJECTION_ENABLED', () => {
- return runTest(cwd, { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'true\n')
+ return runTest(cwd, 'init/trace.js', { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'true\n')
+ })
+ it('should initialize instrumentation, if no DD_INJECTION_ENABLED', () => {
+ return runTest(cwd, 'init/instrument.js', { NODE_OPTIONS }, 'true\n')
+ })
+ it('should initialize instrumentation, if DD_INJECTION_ENABLED', () => {
+ return runTest(cwd, 'init/instrument.js', { NODE_OPTIONS, DD_INJECTION_ENABLED }, 'true\n')
})
+ it(`should ${esmWorks ? '' : 'not '}initialize ESM instrumentation, if no DD_INJECTION_ENABLED`, () => {
+ return runTest(cwd, 'init/instrument.mjs', { NODE_OPTIONS }, `${esmWorks}\n`)
+ })
+ it(`should ${esmWorks ? '' : 'not '}initialize ESM instrumentation, if DD_INJECTION_ENABLED`, () => {
+ return runTest(cwd, 'init/instrument.mjs', { NODE_OPTIONS, DD_INJECTION_ENABLED }, `${esmWorks}\n`)
+ })
+ })
+}
+
+describe('init.js', () => {
+ before(async () => {
+ sandbox = await createSandbox()
+ cwd = sandbox.folder
})
+ afterEach(() => {
+ proc && proc.kill()
+ })
+ after(() => {
+ return sandbox.remove()
+ })
+
+ testInjectionScenarios('require', 'init.js', false)
+})
+
+describe('initialize.mjs', () => {
+ before(async () => {
+ sandbox = await createSandbox()
+ cwd = sandbox.folder
+ })
+ afterEach(() => {
+ proc && proc.kill()
+ })
+ after(() => {
+ return sandbox.remove()
+ })
+
+ context('as --loader', () => {
+ testInjectionScenarios('loader', 'initialize.mjs', true)
+ })
+ if (Number(process.versions.node.split('.')[0]) >= 18) {
+ context('as --import', () => {
+ testInjectionScenarios('import', 'initialize.mjs', true)
+ })
+ }
})
diff --git a/integration-tests/init/instrument.js b/integration-tests/init/instrument.js
new file mode 100644
index 00000000000..55e5d28f450
--- /dev/null
+++ b/integration-tests/init/instrument.js
@@ -0,0 +1,21 @@
+const http = require('http')
+const dc = require('dc-polyfill')
+
+let gotEvent = false
+dc.subscribe('apm:http:client:request:start', (event) => {
+ gotEvent = true
+})
+
+const server = http.createServer((req, res) => {
+ res.end('Hello World')
+}).listen(0, () => {
+ http.get(`http://localhost:${server.address().port}`, (res) => {
+ res.on('data', () => {})
+ res.on('end', () => {
+ server.close()
+ // eslint-disable-next-line no-console
+ console.log(gotEvent)
+ process.exit()
+ })
+ })
+})
diff --git a/integration-tests/init/instrument.mjs b/integration-tests/init/instrument.mjs
new file mode 100644
index 00000000000..bddaf6ef13a
--- /dev/null
+++ b/integration-tests/init/instrument.mjs
@@ -0,0 +1,21 @@
+import http from 'http'
+import dc from 'dc-polyfill'
+
+let gotEvent = false
+dc.subscribe('apm:http:client:request:start', (event) => {
+ gotEvent = true
+})
+
+const server = http.createServer((req, res) => {
+ res.end('Hello World')
+}).listen(0, () => {
+ http.get(`http://localhost:${server.address().port}`, (res) => {
+ res.on('data', () => {})
+ res.on('end', () => {
+ server.close()
+ // eslint-disable-next-line no-console
+ console.log(gotEvent)
+ process.exit()
+ })
+ })
+})
diff --git a/integration-tests/init/index.js b/integration-tests/init/trace.js
similarity index 100%
rename from integration-tests/init/index.js
rename to integration-tests/init/trace.js
diff --git a/integration-tests/profiler/profiler.spec.js b/integration-tests/profiler/profiler.spec.js
index 05488a3a482..0803e2e9ae4 100644
--- a/integration-tests/profiler/profiler.spec.js
+++ b/integration-tests/profiler/profiler.spec.js
@@ -21,20 +21,33 @@ if (process.platform !== 'win32') {
}
function checkProfiles (agent, proc, timeout,
- expectedProfileTypes = DEFAULT_PROFILE_TYPES, expectBadExit = false, multiplicity = 1) {
+ expectedProfileTypes = DEFAULT_PROFILE_TYPES, expectBadExit = false, multiplicity = 1
+) {
+ return Promise.all([
+ processExitPromise(proc, timeout, expectBadExit),
+ expectProfileMessagePromise(agent, timeout, expectedProfileTypes, multiplicity)
+ ])
+}
+
+function expectProfileMessagePromise (agent, timeout,
+ expectedProfileTypes = DEFAULT_PROFILE_TYPES, multiplicity = 1
+) {
const fileNames = expectedProfileTypes.map(type => `${type}.pprof`)
- const resultPromise = agent.assertMessageReceived(({ headers, payload, files }) => {
- assert.propertyVal(headers, 'host', `127.0.0.1:${agent.port}`)
- assert.propertyVal(files[0], 'originalname', 'event.json')
- const event = JSON.parse(files[0].buffer.toString())
- assert.propertyVal(event, 'family', 'node')
- assert.deepPropertyVal(event, 'attachments', fileNames)
- for (const [index, fileName] of fileNames.entries()) {
- assert.propertyVal(files[index + 1], 'originalname', fileName)
+ return agent.assertMessageReceived(({ headers, _, files }) => {
+ try {
+ assert.propertyVal(headers, 'host', `127.0.0.1:${agent.port}`)
+ assert.propertyVal(files[0], 'originalname', 'event.json')
+ const event = JSON.parse(files[0].buffer.toString())
+ assert.propertyVal(event, 'family', 'node')
+ assert.deepPropertyVal(event, 'attachments', fileNames)
+ for (const [index, fileName] of fileNames.entries()) {
+ assert.propertyVal(files[index + 1], 'originalname', fileName)
+ }
+ } catch (e) {
+ e.message += ` ${JSON.stringify({ headers, files })}`
+ throw e
}
}, timeout, multiplicity)
-
- return Promise.all([processExitPromise(proc, timeout, expectBadExit), resultPromise])
}
function processExitPromise (proc, timeout, expectBadExit = false) {
@@ -73,6 +86,18 @@ async function getLatestProfile (cwd, pattern) {
return { profile: Profile.decode(pprofUnzipped), encoded: pprofGzipped.toString('base64') }
}
+function expectTimeout (messagePromise, allowErrors = false) {
+ return messagePromise.then(
+ () => {
+ throw new Error('Received unexpected message')
+ }, (e) => {
+ if (e.message !== 'timeout' && (!allowErrors || !e.message.startsWith('timeout, additionally:'))) {
+ throw e
+ }
+ }
+ )
+}
+
async function gatherNetworkTimelineEvents (cwd, scriptFilePath, eventType, args) {
const procStart = BigInt(Date.now() * 1000000)
const proc = fork(path.join(cwd, scriptFilePath), args, {
@@ -142,6 +167,7 @@ describe('profiler', () => {
let sandbox
let cwd
let profilerTestFile
+ let ssiTestFile
let oomTestFile
let oomEnv
let oomExecArgv
@@ -151,6 +177,7 @@ describe('profiler', () => {
sandbox = await createSandbox()
cwd = sandbox.folder
profilerTestFile = path.join(cwd, 'profiler/index.js')
+ ssiTestFile = path.join(cwd, 'profiler/ssi.js')
oomTestFile = path.join(cwd, 'profiler/oom.js')
oomExecArgv = ['--max-old-space-size=50']
})
@@ -355,19 +382,10 @@ describe('profiler', () => {
DD_PROFILING_ENABLED: 1
}
})
- const checkTelemetry = agent.assertTelemetryReceived(({ headers, payload }) => {
- }, 1000, 'generate-metrics')
+ const checkTelemetry = agent.assertTelemetryReceived(_ => {}, 1000, 'generate-metrics')
// SSI telemetry is not supposed to have been emitted when DD_INJECTION_ENABLED is absent,
- // so throw if telemetry callback was invoked and do nothing if it timed out
- const checkNoTelemetry = checkTelemetry.then(
- () => {
- throw new Error('Received unexpected metrics')
- }, (e) => {
- if (e.message !== 'timeout') {
- throw e
- }
- })
- return Promise.all([checkProfiles(agent, proc, timeout), checkNoTelemetry])
+ // so expect telemetry callback to time out
+ return Promise.all([checkProfiles(agent, proc, timeout), expectTimeout(checkTelemetry)])
})
it('records SSI telemetry on process exit', () => {
@@ -469,4 +487,79 @@ describe('profiler', () => {
})
}
})
+
+ context('SSI heuristics', () => {
+ beforeEach(async () => {
+ agent = await new FakeAgent().start()
+ })
+
+ afterEach(async () => {
+ proc.kill()
+ await agent.stop()
+ })
+
+ describe('does not trigger for', () => {
+ it('a short-lived app that creates no spans', () => {
+ return heuristicsDoesNotTriggerFor([], false, false)
+ })
+
+ it('a short-lived app that creates a span', () => {
+ return heuristicsDoesNotTriggerFor(['create-span'], true, false)
+ })
+
+ it('a long-lived app that creates no spans', () => {
+ return heuristicsDoesNotTriggerFor(['long-lived'], false, false)
+ })
+
+ it('a short-lived app that creates no spans with the auto env var', () => {
+ return heuristicsDoesNotTriggerFor([], false, true)
+ })
+
+ it('a short-lived app that creates a span with the auto env var', () => {
+ return heuristicsDoesNotTriggerFor(['create-span'], true, true)
+ })
+
+ it('a long-lived app that creates no spans with the auto env var', () => {
+ return heuristicsDoesNotTriggerFor(['long-lived'], false, true)
+ })
+ })
+
+ it('triggers for long-lived span-creating app', () => {
+ return heuristicsTrigger(false)
+ })
+
+ it('triggers for long-lived span-creating app with the auto env var', () => {
+ return heuristicsTrigger(true)
+ })
+ })
+
+ function forkSsi (args, whichEnv) {
+ const profilerEnablingEnv = whichEnv ? { DD_PROFILING_ENABLED: 'auto' } : { DD_INJECTION_ENABLED: 'profiler' }
+ return fork(ssiTestFile, args, {
+ cwd,
+ env: {
+ DD_TRACE_AGENT_PORT: agent.port,
+ DD_INTERNAL_PROFILING_LONG_LIVED_THRESHOLD: '1300',
+ ...profilerEnablingEnv
+ }
+ })
+ }
+
+ function heuristicsTrigger (whichEnv) {
+ return checkProfiles(agent,
+ forkSsi(['create-span', 'long-lived'], whichEnv),
+ timeout,
+ DEFAULT_PROFILE_TYPES,
+ false,
+ // Will receive 2 messages: first one is for the trace, second one is for the profile. We
+ // only need the assertions in checkProfiles to succeed for the one with the profile.
+ 2)
+ }
+
+ function heuristicsDoesNotTriggerFor (args, allowTraceMessage, whichEnv) {
+ return Promise.all([
+ processExitPromise(forkSsi(args, whichEnv), timeout, false),
+ expectTimeout(expectProfileMessagePromise(agent, 1500), allowTraceMessage)
+ ])
+ }
})
diff --git a/integration-tests/profiler/ssi.js b/integration-tests/profiler/ssi.js
new file mode 100644
index 00000000000..b184d64762b
--- /dev/null
+++ b/integration-tests/profiler/ssi.js
@@ -0,0 +1,24 @@
+'use strict'
+
+const DDTrace = require('dd-trace')
+
+const tracer = DDTrace.init()
+
+async function run () {
+ const tasks = []
+ // If launched with 'create-span', the app will create a span.
+ if (process.argv.includes('create-span')) {
+ tasks.push(tracer.trace('woo', _ => {
+ return new Promise(setImmediate)
+ }))
+ }
+ // If launched with 'long-lived', the app will remain alive long enough to
+ // be considered long-lived by profiler activation heuristics.
+ if (process.argv.includes('long-lived')) {
+ const longLivedThreshold = Number(process.env.DD_INTERNAL_PROFILING_LONG_LIVED_THRESHOLD)
+ tasks.push(new Promise(resolve => setTimeout(resolve, longLivedThreshold + 200)))
+ }
+ await Promise.all(tasks)
+}
+
+tracer.profilerStarted().then(run)
diff --git a/integration-tests/selenium/selenium.spec.js b/integration-tests/selenium/selenium.spec.js
index 7cec0de791b..e7f2404a2db 100644
--- a/integration-tests/selenium/selenium.spec.js
+++ b/integration-tests/selenium/selenium.spec.js
@@ -113,5 +113,34 @@ versionRange.forEach(version => {
})
})
})
+
+ it('does not crash when used outside a known test framework', (done) => {
+ let testOutput = ''
+ childProcess = exec(
+ 'node ./ci-visibility/test/selenium-no-framework.js',
+ {
+ cwd,
+ env: {
+ ...getCiVisAgentlessConfig(receiver.port),
+ WEB_APP_URL: `http://localhost:${webAppPort}`,
+ TESTS_TO_RUN: '**/ci-visibility/test/selenium-test*'
+ },
+ stdio: 'pipe'
+ }
+ )
+
+ childProcess.on('exit', (code) => {
+ assert.equal(code, 0)
+ assert.notInclude(testOutput, 'InvalidArgumentError')
+ done()
+ })
+
+ childProcess.stdout.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ childProcess.stderr.on('data', (chunk) => {
+ testOutput += chunk.toString()
+ })
+ })
})
})
diff --git a/package.json b/package.json
index 439f2383033..38883d905b4 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "dd-trace",
- "version": "5.14.1",
+ "version": "5.15.0",
"description": "Datadog APM tracing client for JavaScript",
"main": "index.js",
"typings": "index.d.ts",
@@ -32,13 +32,13 @@
"test:plugins:upstream": "node ./packages/dd-trace/test/plugins/suite.js",
"test:profiler": "tap \"packages/dd-trace/test/profiling/**/*.spec.js\"",
"test:profiler:ci": "npm run test:profiler -- --coverage --nyc-arg=--include=\"packages/dd-trace/src/profiling/**/*.js\"",
- "test:integration": "mocha --colors --timeout 30000 \"integration-tests/*.spec.js\"",
- "test:integration:cucumber": "mocha --colors --timeout 30000 \"integration-tests/cucumber/*.spec.js\"",
- "test:integration:cypress": "mocha --colors --timeout 30000 \"integration-tests/cypress/*.spec.js\"",
- "test:integration:playwright": "mocha --colors --timeout 30000 \"integration-tests/playwright/*.spec.js\"",
- "test:integration:selenium": "mocha --colors --timeout 30000 \"integration-tests/selenium/*.spec.js\"",
- "test:integration:profiler": "mocha --colors --timeout 90000 \"integration-tests/profiler/*.spec.js\"",
- "test:integration:serverless": "mocha --colors --timeout 30000 \"integration-tests/serverless/*.spec.js\"",
+ "test:integration": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/*.spec.js\"",
+ "test:integration:cucumber": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/cucumber/*.spec.js\"",
+ "test:integration:cypress": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/cypress/*.spec.js\"",
+ "test:integration:playwright": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/playwright/*.spec.js\"",
+ "test:integration:selenium": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/selenium/*.spec.js\"",
+ "test:integration:profiler": "mocha --colors --timeout 90000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/profiler/*.spec.js\"",
+ "test:integration:serverless": "mocha --colors --timeout 30000 -r \"packages/dd-trace/test/setup/core.js\" \"integration-tests/serverless/*.spec.js\"",
"test:integration:plugins": "mocha --colors --exit -r \"packages/dd-trace/test/setup/mocha.js\" \"packages/datadog-plugin-@($(echo $PLUGINS))/test/integration-test/**/*.spec.js\"",
"test:unit:plugins": "mocha --colors --exit -r \"packages/dd-trace/test/setup/mocha.js\" \"packages/datadog-instrumentations/test/@($(echo $PLUGINS)).spec.js\" \"packages/datadog-plugin-@($(echo $PLUGINS))/test/**/*.spec.js\" --exclude \"packages/datadog-plugin-@($(echo $PLUGINS))/test/integration-test/**/*.spec.js\"",
"test:shimmer": "mocha --colors 'packages/datadog-shimmer/test/**/*.spec.js'",
@@ -70,7 +70,7 @@
"node": ">=18"
},
"dependencies": {
- "@datadog/native-appsec": "7.1.1",
+ "@datadog/native-appsec": "8.0.1",
"@datadog/native-iast-rewriter": "2.3.1",
"@datadog/native-iast-taint-tracking": "2.1.0",
"@datadog/native-metrics": "^2.0.0",
@@ -83,17 +83,14 @@
"ignore": "^5.2.4",
"import-in-the-middle": "^1.7.4",
"int64-buffer": "^0.1.9",
- "ipaddr.js": "^2.1.0",
"istanbul-lib-coverage": "3.2.0",
"jest-docblock": "^29.7.0",
"koalas": "^1.0.2",
"limiter": "1.1.5",
"lodash.sortby": "^4.7.0",
"lru-cache": "^7.14.0",
- "methods": "^1.1.2",
"module-details-from-path": "^1.0.3",
"msgpack-lite": "^0.1.26",
- "node-abort-controller": "^3.1.1",
"opentracing": ">=0.12.1",
"path-to-regexp": "^0.1.2",
"pprof-format": "^2.1.0",
diff --git a/packages/datadog-instrumentations/src/amqplib.js b/packages/datadog-instrumentations/src/amqplib.js
index a5d07ebb4ab..f0650459a47 100644
--- a/packages/datadog-instrumentations/src/amqplib.js
+++ b/packages/datadog-instrumentations/src/amqplib.js
@@ -8,13 +8,16 @@ const {
const kebabCase = require('../../datadog-core/src/utils/src/kebabcase')
const shimmer = require('../../datadog-shimmer')
+const { NODE_MAJOR, NODE_MINOR } = require('../../../version')
+const MIN_VERSION = ((NODE_MAJOR > 22) || (NODE_MAJOR === 22 && NODE_MINOR >= 2)) ? '>=0.5.3' : '>=0.5.0'
+
const startCh = channel('apm:amqplib:command:start')
const finishCh = channel('apm:amqplib:command:finish')
const errorCh = channel('apm:amqplib:command:error')
let methods = {}
-addHook({ name: 'amqplib', file: 'lib/defs.js', versions: ['>=0.5'] }, defs => {
+addHook({ name: 'amqplib', file: 'lib/defs.js', versions: [MIN_VERSION] }, defs => {
methods = Object.keys(defs)
.filter(key => Number.isInteger(defs[key]))
.filter(key => isCamelCase(key))
@@ -22,7 +25,7 @@ addHook({ name: 'amqplib', file: 'lib/defs.js', versions: ['>=0.5'] }, defs => {
return defs
})
-addHook({ name: 'amqplib', file: 'lib/channel.js', versions: ['>=0.5'] }, channel => {
+addHook({ name: 'amqplib', file: 'lib/channel.js', versions: [MIN_VERSION] }, channel => {
shimmer.wrap(channel.Channel.prototype, 'sendImmediately', sendImmediately => function (method, fields) {
return instrument(sendImmediately, this, arguments, methods[method], fields)
})
diff --git a/packages/datadog-instrumentations/src/apollo-server-core.js b/packages/datadog-instrumentations/src/apollo-server-core.js
index 52db88973d7..3f075ab6938 100644
--- a/packages/datadog-instrumentations/src/apollo-server-core.js
+++ b/packages/datadog-instrumentations/src/apollo-server-core.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller')
const { addHook } = require('./helpers/instrument')
const shimmer = require('../../datadog-shimmer')
const dc = require('dc-polyfill')
diff --git a/packages/datadog-instrumentations/src/apollo-server.js b/packages/datadog-instrumentations/src/apollo-server.js
index 1278949372f..c92c4901cd6 100644
--- a/packages/datadog-instrumentations/src/apollo-server.js
+++ b/packages/datadog-instrumentations/src/apollo-server.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller')
const dc = require('dc-polyfill')
const { addHook } = require('./helpers/instrument')
diff --git a/packages/datadog-instrumentations/src/body-parser.js b/packages/datadog-instrumentations/src/body-parser.js
index a73c377ba9a..3e3d7503231 100644
--- a/packages/datadog-instrumentations/src/body-parser.js
+++ b/packages/datadog-instrumentations/src/body-parser.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller') // AbortController is not available in node <15
const shimmer = require('../../datadog-shimmer')
const { channel, addHook } = require('./helpers/instrument')
diff --git a/packages/datadog-instrumentations/src/check_require_cache.js b/packages/datadog-instrumentations/src/check_require_cache.js
index b33ed59f18b..782cb56e5b2 100644
--- a/packages/datadog-instrumentations/src/check_require_cache.js
+++ b/packages/datadog-instrumentations/src/check_require_cache.js
@@ -1,6 +1,30 @@
'use strict'
-/* eslint-disable no-console */
+// This code runs before the tracer is configured and before a logger is ready
+// For that reason we queue up the messages now and decide what to do with them later
+const warnings = []
+
+/**
+ * Here we maintain a list of packages that an application
+ * may have installed which could potentially conflict with
+ */
+const potentialConflicts = new Set([
+ '@appsignal/javascript',
+ '@appsignal/nodejs',
+ '@dynatrace/oneagent',
+ '@instana/aws-fargate',
+ '@instana/aws-lambda',
+ '@instana/azure-container-services',
+ '@instana/collector',
+ '@instana/google-cloud-run',
+ '@sentry/node',
+ 'appoptics-apm',
+ 'atatus-nodejs',
+ 'elastic-apm-node',
+ 'newrelic',
+ 'stackify-node-apm',
+ 'sqreen'
+])
const extractPackageAndModulePath = require('./utils/src/extract-package-and-module-path')
@@ -13,14 +37,14 @@ const extractPackageAndModulePath = require('./utils/src/extract-package-and-mod
*
* Note that this only going to work for modules within npm
* packages, like `express`, and not internal modules, like
- * `http`.
+ * `http`. It also only works with CJS, not with ESM imports.
*
* The output isn't necessarily 100% perfect. For example if the
* app loads a package we instrument but outside of an
* unsupported version then a warning would still be displayed.
* This is OK as the tracer should be loaded earlier anyway.
*/
-module.exports = function () {
+module.exports.checkForRequiredModules = function () {
const packages = require('../../datadog-instrumentations/src/helpers/hooks')
const naughties = new Set()
let didWarn = false
@@ -31,11 +55,49 @@ module.exports = function () {
if (naughties.has(pkg)) continue
if (!(pkg in packages)) continue
- console.error(`Warning: Package '${pkg}' was loaded before dd-trace! This may break instrumentation.`)
+ warnings.push(`Warning: Package '${pkg}' was loaded before dd-trace! This may break instrumentation.`)
naughties.add(pkg)
didWarn = true
}
- if (didWarn) console.error('Warning: Please ensure dd-trace is loaded before other modules.')
+ if (didWarn) warnings.push('Warning: Please ensure dd-trace is loaded before other modules.')
+}
+
+/**
+ * APM tools, and some other packages in the community, work
+ * by monkey-patching internal modules and possibly some
+ * globals. Usually this is done in a conflict-free way by
+ * wrapping an existing method with a new method that still
+ * calls the original method. Unfortunately it's possible
+ * that some of these packages (dd-trace included) may
+ * wrap methods in a way that make it unsafe for the methods
+ * to be wrapped again by another library.
+ *
+ * When encountered, and when debug mode is on, a warning is
+ * printed if such a package is discovered. This can help
+ * when debugging a faulty installation.
+ */
+module.exports.checkForPotentialConflicts = function () {
+ const naughties = new Set()
+ let didWarn = false
+
+ for (const pathToModule of Object.keys(require.cache)) {
+ const { pkg } = extractPackageAndModulePath(pathToModule)
+ if (naughties.has(pkg)) continue
+ if (!potentialConflicts.has(pkg)) continue
+
+ warnings.push(`Warning: Package '${pkg}' may cause conflicts with dd-trace.`)
+
+ naughties.add(pkg)
+ didWarn = true
+ }
+
+ if (didWarn) warnings.push('Warning: Packages were loaded that may conflict with dd-trace.')
+}
+
+module.exports.flushStartupLogs = function (log) {
+ while (warnings.length) {
+ log.warn(warnings.shift())
+ }
}
diff --git a/packages/datadog-instrumentations/src/cookie-parser.js b/packages/datadog-instrumentations/src/cookie-parser.js
index 94a30818e23..7a13682e5b0 100644
--- a/packages/datadog-instrumentations/src/cookie-parser.js
+++ b/packages/datadog-instrumentations/src/cookie-parser.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller') // AbortController is not available in node <15
const shimmer = require('../../datadog-shimmer')
const { channel, addHook } = require('./helpers/instrument')
diff --git a/packages/datadog-instrumentations/src/express.js b/packages/datadog-instrumentations/src/express.js
index 4916bd92f1c..ea507b4e3b0 100644
--- a/packages/datadog-instrumentations/src/express.js
+++ b/packages/datadog-instrumentations/src/express.js
@@ -3,7 +3,6 @@
const { createWrapRouterMethod } = require('./router')
const shimmer = require('../../datadog-shimmer')
const { addHook, channel } = require('./helpers/instrument')
-const { AbortController } = require('node-abort-controller')
const handleChannel = channel('apm:express:request:handle')
diff --git a/packages/datadog-instrumentations/src/graphql.js b/packages/datadog-instrumentations/src/graphql.js
index 11e917a30a9..c776c4f4fa5 100644
--- a/packages/datadog-instrumentations/src/graphql.js
+++ b/packages/datadog-instrumentations/src/graphql.js
@@ -1,7 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller')
-
const {
addHook,
channel,
diff --git a/packages/datadog-instrumentations/src/helpers/hooks.js b/packages/datadog-instrumentations/src/helpers/hooks.js
index 930ac2aed6c..34654182ddd 100644
--- a/packages/datadog-instrumentations/src/helpers/hooks.js
+++ b/packages/datadog-instrumentations/src/helpers/hooks.js
@@ -71,6 +71,7 @@ module.exports = {
'microgateway-core': () => require('../microgateway-core'),
mocha: () => require('../mocha'),
'mocha-each': () => require('../mocha'),
+ workerpool: () => require('../mocha'),
moleculer: () => require('../moleculer'),
mongodb: () => require('../mongodb'),
'mongodb-core': () => require('../mongodb-core'),
diff --git a/packages/datadog-instrumentations/src/helpers/register.js b/packages/datadog-instrumentations/src/helpers/register.js
index c4a2a3f7dde..eba90d6a980 100644
--- a/packages/datadog-instrumentations/src/helpers/register.js
+++ b/packages/datadog-instrumentations/src/helpers/register.js
@@ -29,10 +29,13 @@ if (!disabledInstrumentations.has('fetch')) {
}
const HOOK_SYMBOL = Symbol('hookExportsMap')
-// TODO: make this more efficient
-if (DD_TRACE_DEBUG && DD_TRACE_DEBUG.toLowerCase() !== 'false') checkRequireCache()
+if (DD_TRACE_DEBUG && DD_TRACE_DEBUG.toLowerCase() !== 'false') {
+ checkRequireCache.checkForRequiredModules()
+ setImmediate(checkRequireCache.checkForPotentialConflicts)
+}
+// TODO: make this more efficient
for (const packageName of names) {
if (disabledInstrumentations.has(packageName)) continue
diff --git a/packages/datadog-instrumentations/src/http/server.js b/packages/datadog-instrumentations/src/http/server.js
index 680e6b8dcbf..14bccd88994 100644
--- a/packages/datadog-instrumentations/src/http/server.js
+++ b/packages/datadog-instrumentations/src/http/server.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller') // AbortController is not available in node <15
const {
channel,
addHook
diff --git a/packages/datadog-instrumentations/src/jest.js b/packages/datadog-instrumentations/src/jest.js
index bbf778ba275..5ad67565c7e 100644
--- a/packages/datadog-instrumentations/src/jest.js
+++ b/packages/datadog-instrumentations/src/jest.js
@@ -148,7 +148,7 @@ function getWrappedEnvironment (BaseEnvironment, jestVersion) {
}
let hasSnapshotTests = true
try {
- const { _snapshotData } = this.context.expect.getState().snapshotState
+ const { _snapshotData } = this.getVmContext().expect.getState().snapshotState
hasSnapshotTests = Object.keys(_snapshotData).length > 0
} catch (e) {
// if we can't be sure, we'll err on the side of caution and assume it has snapshots
@@ -589,10 +589,13 @@ function coverageReporterWrapper (coverageReporter) {
/**
* If ITR is active, we're running fewer tests, so of course the total code coverage is reduced.
- * This calculation adds no value, so we'll skip it.
+ * This calculation adds no value, so we'll skip it, as long as the user has not manually opted in to code coverage,
+ * in which case we'll leave it.
*/
shimmer.wrap(CoverageReporter.prototype, '_addUntestedFiles', addUntestedFiles => async function () {
- if (isSuitesSkippingEnabled) {
+ // If the user has added coverage manually, they're willing to pay the price of this execution, so
+ // we will not skip it.
+ if (isSuitesSkippingEnabled && !isUserCodeCoverageEnabled) {
return Promise.resolve()
}
return addUntestedFiles.apply(this, arguments)
diff --git a/packages/datadog-instrumentations/src/mocha.js b/packages/datadog-instrumentations/src/mocha.js
index 6e26b61c145..1c6998a7afc 100644
--- a/packages/datadog-instrumentations/src/mocha.js
+++ b/packages/datadog-instrumentations/src/mocha.js
@@ -1,674 +1,5 @@
-const { createCoverageMap } = require('istanbul-lib-coverage')
-
-const { isMarkedAsUnskippable } = require('../../datadog-plugin-jest/src/util')
-
-const { addHook, channel, AsyncResource } = require('./helpers/instrument')
-const shimmer = require('../../datadog-shimmer')
-const log = require('../../dd-trace/src/log')
-const {
- getCoveredFilenamesFromCoverage,
- resetCoverage,
- mergeCoverage,
- getTestSuitePath,
- fromCoverageMapToCoverage,
- getCallSites,
- addEfdStringToTestName,
- removeEfdStringFromTestName
-} = require('../../dd-trace/src/plugins/util/test')
-
-const testStartCh = channel('ci:mocha:test:start')
-const errorCh = channel('ci:mocha:test:error')
-const skipCh = channel('ci:mocha:test:skip')
-const testFinishCh = channel('ci:mocha:test:finish')
-const parameterizedTestCh = channel('ci:mocha:test:parameterize')
-
-const libraryConfigurationCh = channel('ci:mocha:library-configuration')
-const knownTestsCh = channel('ci:mocha:known-tests')
-const skippableSuitesCh = channel('ci:mocha:test-suite:skippable')
-
-const testSessionStartCh = channel('ci:mocha:session:start')
-const testSessionFinishCh = channel('ci:mocha:session:finish')
-
-const testSuiteStartCh = channel('ci:mocha:test-suite:start')
-const testSuiteFinishCh = channel('ci:mocha:test-suite:finish')
-const testSuiteErrorCh = channel('ci:mocha:test-suite:error')
-const testSuiteCodeCoverageCh = channel('ci:mocha:test-suite:code-coverage')
-
-const itrSkippedSuitesCh = channel('ci:mocha:itr:skipped-suites')
-
-// TODO: remove when root hooks and fixtures are implemented
-const patched = new WeakSet()
-
-const testToAr = new WeakMap()
-const originalFns = new WeakMap()
-const testFileToSuiteAr = new Map()
-const testToStartLine = new WeakMap()
-const newTests = {}
-
-// `isWorker` is true if it's a Mocha worker
-let isWorker = false
-
-// We'll preserve the original coverage here
-const originalCoverageMap = createCoverageMap()
-
-let suitesToSkip = []
-let frameworkVersion
-let isSuitesSkipped = false
-let skippedSuites = []
-const unskippableSuites = []
-let isForcedToRun = false
-let itrCorrelationId = ''
-let isEarlyFlakeDetectionEnabled = false
-let earlyFlakeDetectionNumRetries = 0
-let isSuitesSkippingEnabled = false
-let knownTests = []
-
-function getSuitesByTestFile (root) {
- const suitesByTestFile = {}
- function getSuites (suite) {
- if (suite.file) {
- if (suitesByTestFile[suite.file]) {
- suitesByTestFile[suite.file].push(suite)
- } else {
- suitesByTestFile[suite.file] = [suite]
- }
- }
- suite.suites.forEach(suite => {
- getSuites(suite)
- })
- }
- getSuites(root)
-
- const numSuitesByTestFile = Object.keys(suitesByTestFile).reduce((acc, testFile) => {
- acc[testFile] = suitesByTestFile[testFile].length
- return acc
- }, {})
-
- return { suitesByTestFile, numSuitesByTestFile }
+if (process.env.MOCHA_WORKER_ID) {
+ require('./mocha/worker')
+} else {
+ require('./mocha/main')
}
-
-function getTestStatus (test) {
- if (test.isPending()) {
- return 'skip'
- }
- if (test.isFailed() || test.timedOut) {
- return 'fail'
- }
- return 'pass'
-}
-
-function isRetry (test) {
- return test._currentRetry !== undefined && test._currentRetry !== 0
-}
-
-function getTestFullName (test) {
- return `mocha.${getTestSuitePath(test.file, process.cwd())}.${removeEfdStringFromTestName(test.fullTitle())}`
-}
-
-function isNewTest (test) {
- const testSuite = getTestSuitePath(test.file, process.cwd())
- const testName = removeEfdStringFromTestName(test.fullTitle())
- const testsForSuite = knownTests.mocha?.[testSuite] || []
- return !testsForSuite.includes(testName)
-}
-
-function retryTest (test) {
- const originalTestName = test.title
- const suite = test.parent
- for (let retryIndex = 0; retryIndex < earlyFlakeDetectionNumRetries; retryIndex++) {
- const clonedTest = test.clone()
- clonedTest.title = addEfdStringToTestName(originalTestName, retryIndex + 1)
- suite.addTest(clonedTest)
- clonedTest._ddIsNew = true
- clonedTest._ddIsEfdRetry = true
- }
-}
-
-function getTestAsyncResource (test) {
- if (!test.fn) {
- return testToAr.get(test)
- }
- if (!test.fn.asyncResource) {
- return testToAr.get(test.fn)
- }
- const originalFn = originalFns.get(test.fn)
- return testToAr.get(originalFn)
-}
-
-function getFilteredSuites (originalSuites) {
- return originalSuites.reduce((acc, suite) => {
- const testPath = getTestSuitePath(suite.file, process.cwd())
- const shouldSkip = suitesToSkip.includes(testPath)
- const isUnskippable = unskippableSuites.includes(suite.file)
- if (shouldSkip && !isUnskippable) {
- acc.skippedSuites.add(testPath)
- } else {
- acc.suitesToRun.push(suite)
- }
- return acc
- }, { suitesToRun: [], skippedSuites: new Set() })
-}
-
-function mochaHook (Runner) {
- if (patched.has(Runner)) return Runner
-
- patched.add(Runner)
-
- shimmer.wrap(Runner.prototype, 'runTests', runTests => function (suite, fn) {
- if (isEarlyFlakeDetectionEnabled) {
- // by the time we reach `this.on('test')`, it is too late. We need to add retries here
- suite.tests.forEach(test => {
- if (!test.isPending() && isNewTest(test)) {
- test._ddIsNew = true
- retryTest(test)
- }
- })
- }
- return runTests.apply(this, arguments)
- })
-
- shimmer.wrap(Runner.prototype, 'run', run => function () {
- if (!testStartCh.hasSubscribers || isWorker) {
- return run.apply(this, arguments)
- }
-
- const { suitesByTestFile, numSuitesByTestFile } = getSuitesByTestFile(this.suite)
-
- const testRunAsyncResource = new AsyncResource('bound-anonymous-fn')
-
- this.once('end', testRunAsyncResource.bind(function () {
- let status = 'pass'
- let error
- if (this.stats) {
- status = this.stats.failures === 0 ? 'pass' : 'fail'
- if (this.stats.tests === 0) {
- status = 'skip'
- }
- } else if (this.failures !== 0) {
- status = 'fail'
- }
-
- if (isEarlyFlakeDetectionEnabled) {
- /**
- * If Early Flake Detection (EFD) is enabled the logic is as follows:
- * - If all attempts for a test are failing, the test has failed and we will let the test process fail.
- * - If just a single attempt passes, we will prevent the test process from failing.
- * The rationale behind is the following: you may still be able to block your CI pipeline by gating
- * on flakiness (the test will be considered flaky), but you may choose to unblock the pipeline too.
- */
- for (const tests of Object.values(newTests)) {
- const failingNewTests = tests.filter(test => test.isFailed())
- const areAllNewTestsFailing = failingNewTests.length === tests.length
- if (failingNewTests.length && !areAllNewTestsFailing) {
- this.stats.failures -= failingNewTests.length
- this.failures -= failingNewTests.length
- }
- }
- }
-
- if (status === 'fail') {
- error = new Error(`Failed tests: ${this.failures}.`)
- }
-
- testFileToSuiteAr.clear()
-
- let testCodeCoverageLinesTotal
- if (global.__coverage__) {
- try {
- testCodeCoverageLinesTotal = originalCoverageMap.getCoverageSummary().lines.pct
- } catch (e) {
- // ignore errors
- }
- // restore the original coverage
- global.__coverage__ = fromCoverageMapToCoverage(originalCoverageMap)
- }
-
- testSessionFinishCh.publish({
- status,
- isSuitesSkipped,
- testCodeCoverageLinesTotal,
- numSkippedSuites: skippedSuites.length,
- hasForcedToRunSuites: isForcedToRun,
- hasUnskippableSuites: !!unskippableSuites.length,
- error,
- isEarlyFlakeDetectionEnabled
- })
- }))
-
- this.once('start', testRunAsyncResource.bind(function () {
- const processArgv = process.argv.slice(2).join(' ')
- const command = `mocha ${processArgv}`
- testSessionStartCh.publish({ command, frameworkVersion })
- if (skippedSuites.length) {
- itrSkippedSuitesCh.publish({ skippedSuites, frameworkVersion })
- }
- }))
-
- this.on('suite', function (suite) {
- if (suite.root || !suite.tests.length) {
- return
- }
- let asyncResource = testFileToSuiteAr.get(suite.file)
- if (!asyncResource) {
- asyncResource = new AsyncResource('bound-anonymous-fn')
- testFileToSuiteAr.set(suite.file, asyncResource)
- const isUnskippable = unskippableSuites.includes(suite.file)
- isForcedToRun = isUnskippable && suitesToSkip.includes(getTestSuitePath(suite.file, process.cwd()))
- asyncResource.runInAsyncScope(() => {
- testSuiteStartCh.publish({
- testSuite: suite.file,
- isUnskippable,
- isForcedToRun,
- itrCorrelationId
- })
- })
- }
- })
-
- this.on('suite end', function (suite) {
- if (suite.root) {
- return
- }
- const suitesInTestFile = suitesByTestFile[suite.file]
-
- const isLastSuite = --numSuitesByTestFile[suite.file] === 0
- if (!isLastSuite) {
- return
- }
-
- let status = 'pass'
- if (suitesInTestFile.every(suite => suite.pending)) {
- status = 'skip'
- } else {
- // has to check every test in the test file
- suitesInTestFile.forEach(suite => {
- suite.eachTest(test => {
- if (test.state === 'failed' || test.timedOut) {
- status = 'fail'
- }
- })
- })
- }
-
- if (global.__coverage__) {
- const coverageFiles = getCoveredFilenamesFromCoverage(global.__coverage__)
-
- testSuiteCodeCoverageCh.publish({
- coverageFiles,
- suiteFile: suite.file
- })
- // We need to reset coverage to get a code coverage per suite
- // Before that, we preserve the original coverage
- mergeCoverage(global.__coverage__, originalCoverageMap)
- resetCoverage(global.__coverage__)
- }
-
- const asyncResource = testFileToSuiteAr.get(suite.file)
- asyncResource.runInAsyncScope(() => {
- testSuiteFinishCh.publish(status)
- })
- })
-
- this.on('test', (test) => {
- if (isRetry(test)) {
- return
- }
- const testStartLine = testToStartLine.get(test)
- const asyncResource = new AsyncResource('bound-anonymous-fn')
- testToAr.set(test.fn, asyncResource)
-
- const {
- file: testSuiteAbsolutePath,
- title,
- _ddIsNew: isNew,
- _ddIsEfdRetry: isEfdRetry
- } = test
-
- const testInfo = {
- testName: test.fullTitle(),
- testSuiteAbsolutePath,
- title,
- isNew,
- isEfdRetry,
- testStartLine
- }
-
- // We want to store the result of the new tests
- if (isNew) {
- const testFullName = getTestFullName(test)
- if (newTests[testFullName]) {
- newTests[testFullName].push(test)
- } else {
- newTests[testFullName] = [test]
- }
- }
-
- asyncResource.runInAsyncScope(() => {
- testStartCh.publish(testInfo)
- })
- })
-
- this.on('test end', (test) => {
- const asyncResource = getTestAsyncResource(test)
- const status = getTestStatus(test)
-
- // if there are afterEach to be run, we don't finish the test yet
- if (asyncResource && !test.parent._afterEach.length) {
- asyncResource.runInAsyncScope(() => {
- testFinishCh.publish(status)
- })
- }
- })
-
- // If the hook passes, 'hook end' will be emitted. Otherwise, 'fail' will be emitted
- this.on('hook end', (hook) => {
- const test = hook.ctx.currentTest
- if (test && hook.parent._afterEach.includes(hook)) { // only if it's an afterEach
- const isLastAfterEach = hook.parent._afterEach.indexOf(hook) === hook.parent._afterEach.length - 1
- if (isLastAfterEach) {
- const status = getTestStatus(test)
- const asyncResource = getTestAsyncResource(test)
- asyncResource.runInAsyncScope(() => {
- testFinishCh.publish(status)
- })
- }
- }
- })
-
- this.on('fail', (testOrHook, err) => {
- const testFile = testOrHook.file
- let test = testOrHook
- const isHook = testOrHook.type === 'hook'
- if (isHook && testOrHook.ctx) {
- test = testOrHook.ctx.currentTest
- }
- let testAsyncResource
- if (test) {
- testAsyncResource = getTestAsyncResource(test)
- }
- if (testAsyncResource) {
- testAsyncResource.runInAsyncScope(() => {
- if (isHook) {
- err.message = `${testOrHook.fullTitle()}: ${err.message}`
- errorCh.publish(err)
- // if it's a hook and it has failed, 'test end' will not be called
- testFinishCh.publish('fail')
- } else {
- errorCh.publish(err)
- }
- })
- }
- const testSuiteAsyncResource = testFileToSuiteAr.get(testFile)
-
- if (testSuiteAsyncResource) {
- // we propagate the error to the suite
- const testSuiteError = new Error(
- `"${testOrHook.parent.fullTitle()}" failed with message "${err.message}"`
- )
- testSuiteError.stack = err.stack
- testSuiteAsyncResource.runInAsyncScope(() => {
- testSuiteErrorCh.publish(testSuiteError)
- })
- }
- })
-
- this.on('pending', (test) => {
- const testStartLine = testToStartLine.get(test)
- const {
- file: testSuiteAbsolutePath,
- title
- } = test
-
- const testInfo = {
- testName: test.fullTitle(),
- testSuiteAbsolutePath,
- title,
- testStartLine
- }
-
- const asyncResource = getTestAsyncResource(test)
- if (asyncResource) {
- asyncResource.runInAsyncScope(() => {
- skipCh.publish(testInfo)
- })
- } else {
- // if there is no async resource, the test has been skipped through `test.skip`
- // or the parent suite is skipped
- const skippedTestAsyncResource = new AsyncResource('bound-anonymous-fn')
- if (test.fn) {
- testToAr.set(test.fn, skippedTestAsyncResource)
- } else {
- testToAr.set(test, skippedTestAsyncResource)
- }
- skippedTestAsyncResource.runInAsyncScope(() => {
- skipCh.publish(testInfo)
- })
- }
- })
-
- return run.apply(this, arguments)
- })
-
- return Runner
-}
-
-function mochaEachHook (mochaEach) {
- if (patched.has(mochaEach)) return mochaEach
-
- patched.add(mochaEach)
-
- return shimmer.wrap(mochaEach, function () {
- const [params] = arguments
- const { it, ...rest } = mochaEach.apply(this, arguments)
- return {
- it: function (title) {
- parameterizedTestCh.publish({ title, params })
- it.apply(this, arguments)
- },
- ...rest
- }
- })
-}
-
-addHook({
- name: 'mocha',
- versions: ['>=5.2.0'],
- file: 'lib/mocha.js'
-}, (Mocha, mochaVersion) => {
- frameworkVersion = mochaVersion
- const mochaRunAsyncResource = new AsyncResource('bound-anonymous-fn')
- /**
- * Get ITR configuration and skippable suites
- * If ITR is disabled, `onDone` is called immediately on the subscriber
- */
- shimmer.wrap(Mocha.prototype, 'run', run => function () {
- if (this.options.parallel) {
- log.warn('Unable to initialize CI Visibility because Mocha is running in parallel mode.')
- return run.apply(this, arguments)
- }
-
- if (!libraryConfigurationCh.hasSubscribers || this.isWorker) {
- if (this.isWorker) {
- isWorker = true
- }
- return run.apply(this, arguments)
- }
- this.options.delay = true
-
- const runner = run.apply(this, arguments)
-
- this.files.forEach(path => {
- const isUnskippable = isMarkedAsUnskippable({ path })
- if (isUnskippable) {
- unskippableSuites.push(path)
- }
- })
-
- const onReceivedSkippableSuites = ({ err, skippableSuites, itrCorrelationId: responseItrCorrelationId }) => {
- if (err) {
- suitesToSkip = []
- } else {
- suitesToSkip = skippableSuites
- itrCorrelationId = responseItrCorrelationId
- }
- // We remove the suites that we skip through ITR
- const filteredSuites = getFilteredSuites(runner.suite.suites)
- const { suitesToRun } = filteredSuites
-
- isSuitesSkipped = suitesToRun.length !== runner.suite.suites.length
-
- log.debug(
- () => `${suitesToRun.length} out of ${runner.suite.suites.length} suites are going to run.`
- )
-
- runner.suite.suites = suitesToRun
-
- skippedSuites = Array.from(filteredSuites.skippedSuites)
-
- global.run()
- }
-
- const onReceivedKnownTests = ({ err, knownTests: receivedKnownTests }) => {
- if (err) {
- knownTests = []
- isEarlyFlakeDetectionEnabled = false
- } else {
- knownTests = receivedKnownTests
- }
-
- if (isSuitesSkippingEnabled) {
- skippableSuitesCh.publish({
- onDone: mochaRunAsyncResource.bind(onReceivedSkippableSuites)
- })
- } else {
- global.run()
- }
- }
-
- const onReceivedConfiguration = ({ err, libraryConfig }) => {
- if (err || !skippableSuitesCh.hasSubscribers || !knownTestsCh.hasSubscribers) {
- return global.run()
- }
-
- isEarlyFlakeDetectionEnabled = libraryConfig.isEarlyFlakeDetectionEnabled
- isSuitesSkippingEnabled = libraryConfig.isSuitesSkippingEnabled
- earlyFlakeDetectionNumRetries = libraryConfig.earlyFlakeDetectionNumRetries
-
- if (isEarlyFlakeDetectionEnabled) {
- knownTestsCh.publish({
- onDone: mochaRunAsyncResource.bind(onReceivedKnownTests)
- })
- } else if (isSuitesSkippingEnabled) {
- skippableSuitesCh.publish({
- onDone: mochaRunAsyncResource.bind(onReceivedSkippableSuites)
- })
- } else {
- global.run()
- }
- }
-
- mochaRunAsyncResource.runInAsyncScope(() => {
- libraryConfigurationCh.publish({
- onDone: mochaRunAsyncResource.bind(onReceivedConfiguration)
- })
- })
- return runner
- })
- return Mocha
-})
-
-addHook({
- name: 'mocha',
- versions: ['>=5.2.0'],
- file: 'lib/suite.js'
-}, (Suite) => {
- shimmer.wrap(Suite.prototype, 'addTest', addTest => function (test) {
- const callSites = getCallSites()
- let startLine
- const testCallSite = callSites.find(site => site.getFileName() === test.file)
- if (testCallSite) {
- startLine = testCallSite.getLineNumber()
- testToStartLine.set(test, startLine)
- }
- return addTest.apply(this, arguments)
- })
- return Suite
-})
-
-addHook({
- name: 'mocha',
- versions: ['>=5.2.0'],
- file: 'lib/runner.js'
-}, mochaHook)
-
-addHook({
- name: 'mocha',
- versions: ['>=5.2.0'],
- file: 'lib/cli/run-helpers.js'
-}, (run) => {
- shimmer.wrap(run, 'runMocha', runMocha => async function () {
- if (!testStartCh.hasSubscribers) {
- return runMocha.apply(this, arguments)
- }
- const mocha = arguments[0]
- /**
- * This attaches `run` to the global context, which we'll call after
- * our configuration and skippable suites requests
- */
- if (!mocha.options.parallel) {
- mocha.options.delay = true
- }
- return runMocha.apply(this, arguments)
- })
- return run
-})
-
-addHook({
- name: 'mocha',
- versions: ['>=5.2.0'],
- file: 'lib/runnable.js'
-}, (Runnable) => {
- shimmer.wrap(Runnable.prototype, 'run', run => function () {
- if (!testStartCh.hasSubscribers) {
- return run.apply(this, arguments)
- }
- const isBeforeEach = this.parent._beforeEach.includes(this)
- const isAfterEach = this.parent._afterEach.includes(this)
-
- const isTestHook = isBeforeEach || isAfterEach
-
- // we restore the original user defined function
- if (this.fn.asyncResource) {
- const originalFn = originalFns.get(this.fn)
- this.fn = originalFn
- }
-
- if (isTestHook || this.type === 'test') {
- const test = isTestHook ? this.ctx.currentTest : this
- const asyncResource = getTestAsyncResource(test)
-
- if (asyncResource) {
- // we bind the test fn to the correct async resource
- const newFn = asyncResource.bind(this.fn)
-
- // we store the original function, not to lose it
- originalFns.set(newFn, this.fn)
- this.fn = newFn
-
- // Temporarily keep functionality when .asyncResource is removed from node
- // in https://github.com/nodejs/node/pull/46432
- if (!this.fn.asyncResource) {
- this.fn.asyncResource = asyncResource
- }
- }
- }
-
- return run.apply(this, arguments)
- })
- return Runnable
-})
-
-addHook({
- name: 'mocha-each',
- versions: ['>=2.0.1']
-}, mochaEachHook)
diff --git a/packages/datadog-instrumentations/src/mocha/common.js b/packages/datadog-instrumentations/src/mocha/common.js
new file mode 100644
index 00000000000..11168c55ce2
--- /dev/null
+++ b/packages/datadog-instrumentations/src/mocha/common.js
@@ -0,0 +1,48 @@
+const { addHook, channel } = require('../helpers/instrument')
+const shimmer = require('../../../datadog-shimmer')
+const { getCallSites } = require('../../../dd-trace/src/plugins/util/test')
+const { testToStartLine } = require('./utils')
+
+const parameterizedTestCh = channel('ci:mocha:test:parameterize')
+const patched = new WeakSet()
+
+// mocha-each support
+addHook({
+ name: 'mocha-each',
+ versions: ['>=2.0.1']
+}, mochaEach => {
+ if (patched.has(mochaEach)) return mochaEach
+
+ patched.add(mochaEach)
+
+ return shimmer.wrap(mochaEach, function () {
+ const [params] = arguments
+ const { it, ...rest } = mochaEach.apply(this, arguments)
+ return {
+ it: function (title) {
+ parameterizedTestCh.publish({ title, params })
+ it.apply(this, arguments)
+ },
+ ...rest
+ }
+ })
+})
+
+// support for start line
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/suite.js'
+}, (Suite) => {
+ shimmer.wrap(Suite.prototype, 'addTest', addTest => function (test) {
+ const callSites = getCallSites()
+ let startLine
+ const testCallSite = callSites.find(site => site.getFileName() === test.file)
+ if (testCallSite) {
+ startLine = testCallSite.getLineNumber()
+ testToStartLine.set(test, startLine)
+ }
+ return addTest.apply(this, arguments)
+ })
+ return Suite
+})
diff --git a/packages/datadog-instrumentations/src/mocha/main.js b/packages/datadog-instrumentations/src/mocha/main.js
new file mode 100644
index 00000000000..fbf8ca88a9b
--- /dev/null
+++ b/packages/datadog-instrumentations/src/mocha/main.js
@@ -0,0 +1,487 @@
+'use strict'
+
+const { createCoverageMap } = require('istanbul-lib-coverage')
+const { addHook, channel, AsyncResource } = require('../helpers/instrument')
+const shimmer = require('../../../datadog-shimmer')
+const { isMarkedAsUnskippable } = require('../../../datadog-plugin-jest/src/util')
+const log = require('../../../dd-trace/src/log')
+const {
+ getTestSuitePath,
+ MOCHA_WORKER_TRACE_PAYLOAD_CODE,
+ fromCoverageMapToCoverage,
+ getCoveredFilenamesFromCoverage,
+ mergeCoverage,
+ resetCoverage
+} = require('../../../dd-trace/src/plugins/util/test')
+
+const {
+ isNewTest,
+ retryTest,
+ getSuitesByTestFile,
+ runnableWrapper,
+ getOnTestHandler,
+ getOnTestEndHandler,
+ getOnHookEndHandler,
+ getOnFailHandler,
+ getOnPendingHandler,
+ testFileToSuiteAr
+} = require('./utils')
+require('./common')
+
+const testSessionAsyncResource = new AsyncResource('bound-anonymous-fn')
+const patched = new WeakSet()
+const newTests = {}
+let suitesToSkip = []
+const unskippableSuites = []
+let isSuitesSkipped = false
+let skippedSuites = []
+let isEarlyFlakeDetectionEnabled = false
+let isSuitesSkippingEnabled = false
+let earlyFlakeDetectionNumRetries = 0
+let knownTests = []
+let itrCorrelationId = ''
+let isForcedToRun = false
+
+// We'll preserve the original coverage here
+const originalCoverageMap = createCoverageMap()
+
+// test channels
+const testStartCh = channel('ci:mocha:test:start')
+
+// test suite channels
+const testSuiteStartCh = channel('ci:mocha:test-suite:start')
+const testSuiteFinishCh = channel('ci:mocha:test-suite:finish')
+const testSuiteErrorCh = channel('ci:mocha:test-suite:error')
+const testSuiteCodeCoverageCh = channel('ci:mocha:test-suite:code-coverage')
+
+// session channels
+const libraryConfigurationCh = channel('ci:mocha:library-configuration')
+const knownTestsCh = channel('ci:mocha:known-tests')
+const skippableSuitesCh = channel('ci:mocha:test-suite:skippable')
+const workerReportTraceCh = channel('ci:mocha:worker-report:trace')
+const testSessionStartCh = channel('ci:mocha:session:start')
+const testSessionFinishCh = channel('ci:mocha:session:finish')
+const itrSkippedSuitesCh = channel('ci:mocha:itr:skipped-suites')
+
+function getFilteredSuites (originalSuites) {
+ return originalSuites.reduce((acc, suite) => {
+ const testPath = getTestSuitePath(suite.file, process.cwd())
+ const shouldSkip = suitesToSkip.includes(testPath)
+ const isUnskippable = unskippableSuites.includes(suite.file)
+ if (shouldSkip && !isUnskippable) {
+ acc.skippedSuites.add(testPath)
+ } else {
+ acc.suitesToRun.push(suite)
+ }
+ return acc
+ }, { suitesToRun: [], skippedSuites: new Set() })
+}
+
+function getOnStartHandler (isParallel, frameworkVersion) {
+ return testSessionAsyncResource.bind(function () {
+ const processArgv = process.argv.slice(2).join(' ')
+ const command = `mocha ${processArgv}`
+ testSessionStartCh.publish({ command, frameworkVersion })
+ if (!isParallel && skippedSuites.length) {
+ itrSkippedSuitesCh.publish({ skippedSuites, frameworkVersion })
+ }
+ })
+}
+
+function getOnEndHandler (isParallel) {
+ return testSessionAsyncResource.bind(function () {
+ let status = 'pass'
+ let error
+ if (this.stats) {
+ status = this.stats.failures === 0 ? 'pass' : 'fail'
+ if (this.stats.tests === 0) {
+ status = 'skip'
+ }
+ } else if (this.failures !== 0) {
+ status = 'fail'
+ }
+
+ if (!isParallel && isEarlyFlakeDetectionEnabled) {
+ /**
+ * If Early Flake Detection (EFD) is enabled the logic is as follows:
+ * - If all attempts for a test are failing, the test has failed and we will let the test process fail.
+ * - If just a single attempt passes, we will prevent the test process from failing.
+ * The rationale behind is the following: you may still be able to block your CI pipeline by gating
+ * on flakiness (the test will be considered flaky), but you may choose to unblock the pipeline too.
+ */
+ for (const tests of Object.values(newTests)) {
+ const failingNewTests = tests.filter(test => test.isFailed())
+ const areAllNewTestsFailing = failingNewTests.length === tests.length
+ if (failingNewTests.length && !areAllNewTestsFailing) {
+ this.stats.failures -= failingNewTests.length
+ this.failures -= failingNewTests.length
+ }
+ }
+ }
+
+ if (status === 'fail') {
+ error = new Error(`Failed tests: ${this.failures}.`)
+ }
+
+ testFileToSuiteAr.clear()
+
+ let testCodeCoverageLinesTotal
+ if (global.__coverage__) {
+ try {
+ testCodeCoverageLinesTotal = originalCoverageMap.getCoverageSummary().lines.pct
+ } catch (e) {
+ // ignore errors
+ }
+ // restore the original coverage
+ global.__coverage__ = fromCoverageMapToCoverage(originalCoverageMap)
+ }
+
+ testSessionFinishCh.publish({
+ status,
+ isSuitesSkipped,
+ testCodeCoverageLinesTotal,
+ numSkippedSuites: skippedSuites.length,
+ hasForcedToRunSuites: isForcedToRun,
+ hasUnskippableSuites: !!unskippableSuites.length,
+ error,
+ isEarlyFlakeDetectionEnabled,
+ isParallel
+ })
+ })
+}
+
+// In this hook we delay the execution with options.delay to grab library configuration,
+// skippable and known tests.
+// It is called but skipped in parallel mode.
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/mocha.js'
+}, (Mocha) => {
+ const mochaRunAsyncResource = new AsyncResource('bound-anonymous-fn')
+ shimmer.wrap(Mocha.prototype, 'run', run => function () {
+ // Workers do not need to request any data, just run the tests
+ if (!testStartCh.hasSubscribers || process.env.MOCHA_WORKER_ID || this.options.parallel) {
+ return run.apply(this, arguments)
+ }
+
+ // `options.delay` does not work in parallel mode, so ITR and EFD can't work.
+ // TODO: use `lib/cli/run-helpers.js#runMocha` to get the data in parallel mode.
+ this.options.delay = true
+
+ const runner = run.apply(this, arguments)
+
+ this.files.forEach(path => {
+ const isUnskippable = isMarkedAsUnskippable({ path })
+ if (isUnskippable) {
+ unskippableSuites.push(path)
+ }
+ })
+
+ const onReceivedSkippableSuites = ({ err, skippableSuites, itrCorrelationId: responseItrCorrelationId }) => {
+ if (err) {
+ suitesToSkip = []
+ } else {
+ suitesToSkip = skippableSuites
+ itrCorrelationId = responseItrCorrelationId
+ }
+ // We remove the suites that we skip through ITR
+ const filteredSuites = getFilteredSuites(runner.suite.suites)
+ const { suitesToRun } = filteredSuites
+
+ isSuitesSkipped = suitesToRun.length !== runner.suite.suites.length
+
+ log.debug(
+ () => `${suitesToRun.length} out of ${runner.suite.suites.length} suites are going to run.`
+ )
+
+ runner.suite.suites = suitesToRun
+
+ skippedSuites = Array.from(filteredSuites.skippedSuites)
+
+ global.run()
+ }
+
+ const onReceivedKnownTests = ({ err, knownTests: receivedKnownTests }) => {
+ if (err) {
+ knownTests = []
+ isEarlyFlakeDetectionEnabled = false
+ } else {
+ knownTests = receivedKnownTests
+ }
+
+ if (isSuitesSkippingEnabled) {
+ skippableSuitesCh.publish({
+ onDone: mochaRunAsyncResource.bind(onReceivedSkippableSuites)
+ })
+ } else {
+ global.run()
+ }
+ }
+
+ const onReceivedConfiguration = ({ err, libraryConfig }) => {
+ if (err || !skippableSuitesCh.hasSubscribers || !knownTestsCh.hasSubscribers) {
+ return global.run()
+ }
+
+ isEarlyFlakeDetectionEnabled = libraryConfig.isEarlyFlakeDetectionEnabled
+ isSuitesSkippingEnabled = libraryConfig.isSuitesSkippingEnabled
+ earlyFlakeDetectionNumRetries = libraryConfig.earlyFlakeDetectionNumRetries
+
+ if (isEarlyFlakeDetectionEnabled) {
+ knownTestsCh.publish({
+ onDone: mochaRunAsyncResource.bind(onReceivedKnownTests)
+ })
+ } else if (isSuitesSkippingEnabled) {
+ skippableSuitesCh.publish({
+ onDone: mochaRunAsyncResource.bind(onReceivedSkippableSuites)
+ })
+ } else {
+ global.run()
+ }
+ }
+
+ mochaRunAsyncResource.runInAsyncScope(() => {
+ libraryConfigurationCh.publish({
+ onDone: mochaRunAsyncResource.bind(onReceivedConfiguration)
+ })
+ })
+
+ return runner
+ })
+ return Mocha
+})
+
+// Only used to set `mocha.options.delay` to true in serial mode. When the mocha CLI is used,
+// setting options.delay in Mocha#run is not enough to delay the execution.
+// TODO: modify this hook to grab the data in parallel mode, so that ITR and EFD can work.
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/cli/run-helpers.js'
+}, (run) => {
+ shimmer.wrap(run, 'runMocha', runMocha => async function () {
+ if (!testStartCh.hasSubscribers) {
+ return runMocha.apply(this, arguments)
+ }
+
+ const mocha = arguments[0]
+ /**
+ * This attaches `run` to the global context, which we'll call after
+ * our configuration and skippable suites requests
+ */
+ if (!mocha.options.parallel) {
+ mocha.options.delay = true
+ }
+ return runMocha.apply(this, arguments)
+ })
+ return run
+})
+
+// Only used in serial mode (no --parallel flag is passed)
+// This hook is used to generate session, module, suite and test events
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/runner.js'
+}, function (Runner, frameworkVersion) {
+ if (patched.has(Runner)) return Runner
+
+ patched.add(Runner)
+
+ shimmer.wrap(Runner.prototype, 'runTests', runTests => function (suite, fn) {
+ if (isEarlyFlakeDetectionEnabled) {
+ // by the time we reach `this.on('test')`, it is too late. We need to add retries here
+ suite.tests.forEach(test => {
+ if (!test.isPending() && isNewTest(test, knownTests)) {
+ test._ddIsNew = true
+ retryTest(test, earlyFlakeDetectionNumRetries)
+ }
+ })
+ }
+ return runTests.apply(this, arguments)
+ })
+
+ shimmer.wrap(Runner.prototype, 'run', run => function () {
+ if (!testStartCh.hasSubscribers) {
+ return run.apply(this, arguments)
+ }
+
+ const { suitesByTestFile, numSuitesByTestFile } = getSuitesByTestFile(this.suite)
+
+ this.once('start', getOnStartHandler(false, frameworkVersion))
+
+ this.once('end', getOnEndHandler(false))
+
+ this.on('test', getOnTestHandler(true, newTests))
+
+ this.on('test end', getOnTestEndHandler())
+
+ // If the hook passes, 'hook end' will be emitted. Otherwise, 'fail' will be emitted
+ this.on('hook end', getOnHookEndHandler())
+
+ this.on('fail', getOnFailHandler(true))
+
+ this.on('pending', getOnPendingHandler())
+
+ this.on('suite', function (suite) {
+ if (suite.root || !suite.tests.length) {
+ return
+ }
+ let asyncResource = testFileToSuiteAr.get(suite.file)
+ if (!asyncResource) {
+ asyncResource = new AsyncResource('bound-anonymous-fn')
+ testFileToSuiteAr.set(suite.file, asyncResource)
+ const isUnskippable = unskippableSuites.includes(suite.file)
+ isForcedToRun = isUnskippable && suitesToSkip.includes(getTestSuitePath(suite.file, process.cwd()))
+ asyncResource.runInAsyncScope(() => {
+ testSuiteStartCh.publish({
+ testSuiteAbsolutePath: suite.file,
+ isUnskippable,
+ isForcedToRun,
+ itrCorrelationId
+ })
+ })
+ }
+ })
+
+ this.on('suite end', function (suite) {
+ if (suite.root) {
+ return
+ }
+ const suitesInTestFile = suitesByTestFile[suite.file]
+
+ const isLastSuite = --numSuitesByTestFile[suite.file] === 0
+ if (!isLastSuite) {
+ return
+ }
+
+ let status = 'pass'
+ if (suitesInTestFile.every(suite => suite.pending)) {
+ status = 'skip'
+ } else {
+ // has to check every test in the test file
+ suitesInTestFile.forEach(suite => {
+ suite.eachTest(test => {
+ if (test.state === 'failed' || test.timedOut) {
+ status = 'fail'
+ }
+ })
+ })
+ }
+
+ if (global.__coverage__) {
+ const coverageFiles = getCoveredFilenamesFromCoverage(global.__coverage__)
+
+ testSuiteCodeCoverageCh.publish({
+ coverageFiles,
+ suiteFile: suite.file
+ })
+ // We need to reset coverage to get a code coverage per suite
+ // Before that, we preserve the original coverage
+ mergeCoverage(global.__coverage__, originalCoverageMap)
+ resetCoverage(global.__coverage__)
+ }
+
+ const asyncResource = testFileToSuiteAr.get(suite.file)
+ asyncResource.runInAsyncScope(() => {
+ testSuiteFinishCh.publish(status)
+ })
+ })
+
+ return run.apply(this, arguments)
+ })
+
+ return Runner
+})
+
+// Used both in serial and parallel mode, and by both the main process and the workers
+// Used to set the correct async resource to the test.
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/runnable.js'
+}, runnableWrapper)
+
+// Only used in parallel mode (--parallel flag is passed)
+// Used to generate suite events and receive test payloads from workers
+addHook({
+ name: 'workerpool',
+ // mocha@8.0.0 added parallel support and uses workerpool for it
+ // The version they use is 6.0.0:
+ // https://github.com/mochajs/mocha/blob/612fa31228c695f16173ac675f40ccdf26b4cfb5/package.json#L75
+ versions: ['>=6.0.0'],
+ file: 'src/WorkerHandler.js'
+}, (workerHandlerPackage) => {
+ shimmer.wrap(workerHandlerPackage.prototype, 'exec', exec => function (message, [testSuiteAbsolutePath]) {
+ if (!testStartCh.hasSubscribers) {
+ return exec.apply(this, arguments)
+ }
+
+ this.worker.on('message', function (message) {
+ if (Array.isArray(message)) {
+ const [messageCode, payload] = message
+ if (messageCode === MOCHA_WORKER_TRACE_PAYLOAD_CODE) {
+ testSessionAsyncResource.runInAsyncScope(() => {
+ workerReportTraceCh.publish(payload)
+ })
+ }
+ }
+ })
+
+ const testSuiteAsyncResource = new AsyncResource('bound-anonymous-fn')
+ testSuiteAsyncResource.runInAsyncScope(() => {
+ testSuiteStartCh.publish({
+ testSuiteAbsolutePath
+ })
+ })
+
+ try {
+ const promise = exec.apply(this, arguments)
+ promise.then(
+ (result) => {
+ const status = result.failureCount === 0 ? 'pass' : 'fail'
+ testSuiteAsyncResource.runInAsyncScope(() => {
+ testSuiteFinishCh.publish(status)
+ })
+ },
+ (err) => {
+ testSuiteAsyncResource.runInAsyncScope(() => {
+ testSuiteErrorCh.publish(err)
+ testSuiteFinishCh.publish('fail')
+ })
+ }
+ )
+ return promise
+ } catch (err) {
+ testSuiteAsyncResource.runInAsyncScope(() => {
+ testSuiteErrorCh.publish(err)
+ testSuiteFinishCh.publish('fail')
+ })
+ throw err
+ }
+ })
+
+ return workerHandlerPackage
+})
+
+// Only used in parallel mode (--parallel flag is passed)
+// Used to start and finish test session and test module
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/nodejs/parallel-buffered-runner.js'
+}, (ParallelBufferedRunner, frameworkVersion) => {
+ shimmer.wrap(ParallelBufferedRunner.prototype, 'run', run => function () {
+ if (!testStartCh.hasSubscribers) {
+ return run.apply(this, arguments)
+ }
+
+ this.once('start', getOnStartHandler(true, frameworkVersion))
+ this.once('end', getOnEndHandler(true))
+
+ return run.apply(this, arguments)
+ })
+
+ return ParallelBufferedRunner
+})
diff --git a/packages/datadog-instrumentations/src/mocha/utils.js b/packages/datadog-instrumentations/src/mocha/utils.js
new file mode 100644
index 00000000000..254f3be5860
--- /dev/null
+++ b/packages/datadog-instrumentations/src/mocha/utils.js
@@ -0,0 +1,306 @@
+'use strict'
+
+const {
+ getTestSuitePath,
+ removeEfdStringFromTestName,
+ addEfdStringToTestName
+} = require('../../../dd-trace/src/plugins/util/test')
+const { channel, AsyncResource } = require('../helpers/instrument')
+const shimmer = require('../../../datadog-shimmer')
+
+// test channels
+const testStartCh = channel('ci:mocha:test:start')
+const testFinishCh = channel('ci:mocha:test:finish')
+const errorCh = channel('ci:mocha:test:error')
+const skipCh = channel('ci:mocha:test:skip')
+
+// suite channels
+const testSuiteErrorCh = channel('ci:mocha:test-suite:error')
+
+const testToAr = new WeakMap()
+const originalFns = new WeakMap()
+const testToStartLine = new WeakMap()
+const testFileToSuiteAr = new Map()
+const wrappedFunctions = new WeakSet()
+
+function isNewTest (test, knownTests) {
+ const testSuite = getTestSuitePath(test.file, process.cwd())
+ const testName = removeEfdStringFromTestName(test.fullTitle())
+ const testsForSuite = knownTests.mocha?.[testSuite] || []
+ return !testsForSuite.includes(testName)
+}
+
+function retryTest (test, earlyFlakeDetectionNumRetries) {
+ const originalTestName = test.title
+ const suite = test.parent
+ for (let retryIndex = 0; retryIndex < earlyFlakeDetectionNumRetries; retryIndex++) {
+ const clonedTest = test.clone()
+ clonedTest.title = addEfdStringToTestName(originalTestName, retryIndex + 1)
+ suite.addTest(clonedTest)
+ clonedTest._ddIsNew = true
+ clonedTest._ddIsEfdRetry = true
+ }
+}
+
+function getSuitesByTestFile (root) {
+ const suitesByTestFile = {}
+ function getSuites (suite) {
+ if (suite.file) {
+ if (suitesByTestFile[suite.file]) {
+ suitesByTestFile[suite.file].push(suite)
+ } else {
+ suitesByTestFile[suite.file] = [suite]
+ }
+ }
+ suite.suites.forEach(suite => {
+ getSuites(suite)
+ })
+ }
+ getSuites(root)
+
+ const numSuitesByTestFile = Object.keys(suitesByTestFile).reduce((acc, testFile) => {
+ acc[testFile] = suitesByTestFile[testFile].length
+ return acc
+ }, {})
+
+ return { suitesByTestFile, numSuitesByTestFile }
+}
+
+function isMochaRetry (test) {
+ return test._currentRetry !== undefined && test._currentRetry !== 0
+}
+
+function getTestFullName (test) {
+ return `mocha.${getTestSuitePath(test.file, process.cwd())}.${removeEfdStringFromTestName(test.fullTitle())}`
+}
+
+function getTestStatus (test) {
+ if (test.isPending()) {
+ return 'skip'
+ }
+ if (test.isFailed() || test.timedOut) {
+ return 'fail'
+ }
+ return 'pass'
+}
+
+function getTestAsyncResource (test) {
+ if (!test.fn) {
+ return testToAr.get(test)
+ }
+ if (!wrappedFunctions.has(test.fn)) {
+ return testToAr.get(test.fn)
+ }
+ const originalFn = originalFns.get(test.fn)
+ return testToAr.get(originalFn)
+}
+
+function runnableWrapper (RunnablePackage) {
+ shimmer.wrap(RunnablePackage.prototype, 'run', run => function () {
+ if (!testStartCh.hasSubscribers) {
+ return run.apply(this, arguments)
+ }
+ const isBeforeEach = this.parent._beforeEach.includes(this)
+ const isAfterEach = this.parent._afterEach.includes(this)
+
+ const isTestHook = isBeforeEach || isAfterEach
+
+ // we restore the original user defined function
+ if (wrappedFunctions.has(this.fn)) {
+ const originalFn = originalFns.get(this.fn)
+ this.fn = originalFn
+ wrappedFunctions.delete(this.fn)
+ }
+
+ if (isTestHook || this.type === 'test') {
+ const test = isTestHook ? this.ctx.currentTest : this
+ const asyncResource = getTestAsyncResource(test)
+
+ if (asyncResource) {
+ // we bind the test fn to the correct async resource
+ const newFn = asyncResource.bind(this.fn)
+
+ // we store the original function, not to lose it
+ originalFns.set(newFn, this.fn)
+ this.fn = newFn
+
+ wrappedFunctions.add(this.fn)
+ }
+ }
+
+ return run.apply(this, arguments)
+ })
+ return RunnablePackage
+}
+
+function getOnTestHandler (isMain, newTests) {
+ return function (test) {
+ if (isMochaRetry(test)) {
+ return
+ }
+ const testStartLine = testToStartLine.get(test)
+ const asyncResource = new AsyncResource('bound-anonymous-fn')
+ testToAr.set(test.fn, asyncResource)
+
+ const {
+ file: testSuiteAbsolutePath,
+ title,
+ _ddIsNew: isNew,
+ _ddIsEfdRetry: isEfdRetry
+ } = test
+
+ const testInfo = {
+ testName: test.fullTitle(),
+ testSuiteAbsolutePath,
+ title,
+ testStartLine
+ }
+
+ if (isMain) {
+ testInfo.isNew = isNew
+ testInfo.isEfdRetry = isEfdRetry
+ // We want to store the result of the new tests
+ if (isNew) {
+ const testFullName = getTestFullName(test)
+ if (newTests[testFullName]) {
+ newTests[testFullName].push(test)
+ } else {
+ newTests[testFullName] = [test]
+ }
+ }
+ } else {
+ testInfo.isParallel = true
+ }
+
+ asyncResource.runInAsyncScope(() => {
+ testStartCh.publish(testInfo)
+ })
+ }
+}
+
+function getOnTestEndHandler () {
+ return function (test) {
+ const asyncResource = getTestAsyncResource(test)
+ const status = getTestStatus(test)
+
+ // if there are afterEach to be run, we don't finish the test yet
+ if (asyncResource && !test.parent._afterEach.length) {
+ asyncResource.runInAsyncScope(() => {
+ testFinishCh.publish(status)
+ })
+ }
+ }
+}
+
+function getOnHookEndHandler () {
+ return function (hook) {
+ const test = hook.ctx.currentTest
+ if (test && hook.parent._afterEach.includes(hook)) { // only if it's an afterEach
+ const isLastAfterEach = hook.parent._afterEach.indexOf(hook) === hook.parent._afterEach.length - 1
+ if (isLastAfterEach) {
+ const status = getTestStatus(test)
+ const asyncResource = getTestAsyncResource(test)
+ asyncResource.runInAsyncScope(() => {
+ testFinishCh.publish(status)
+ })
+ }
+ }
+ }
+}
+
+function getOnFailHandler (isMain) {
+ return function (testOrHook, err) {
+ const testFile = testOrHook.file
+ let test = testOrHook
+ const isHook = testOrHook.type === 'hook'
+ if (isHook && testOrHook.ctx) {
+ test = testOrHook.ctx.currentTest
+ }
+ let testAsyncResource
+ if (test) {
+ testAsyncResource = getTestAsyncResource(test)
+ }
+ if (testAsyncResource) {
+ testAsyncResource.runInAsyncScope(() => {
+ if (isHook) {
+ err.message = `${testOrHook.fullTitle()}: ${err.message}`
+ errorCh.publish(err)
+ // if it's a hook and it has failed, 'test end' will not be called
+ testFinishCh.publish('fail')
+ } else {
+ errorCh.publish(err)
+ }
+ })
+ }
+
+ if (isMain) {
+ const testSuiteAsyncResource = testFileToSuiteAr.get(testFile)
+
+ if (testSuiteAsyncResource) {
+ // we propagate the error to the suite
+ const testSuiteError = new Error(
+ `"${testOrHook.parent.fullTitle()}" failed with message "${err.message}"`
+ )
+ testSuiteError.stack = err.stack
+ testSuiteAsyncResource.runInAsyncScope(() => {
+ testSuiteErrorCh.publish(testSuiteError)
+ })
+ }
+ }
+ }
+}
+
+function getOnPendingHandler () {
+ return function (test) {
+ const testStartLine = testToStartLine.get(test)
+ const {
+ file: testSuiteAbsolutePath,
+ title
+ } = test
+
+ const testInfo = {
+ testName: test.fullTitle(),
+ testSuiteAbsolutePath,
+ title,
+ testStartLine
+ }
+
+ const asyncResource = getTestAsyncResource(test)
+ if (asyncResource) {
+ asyncResource.runInAsyncScope(() => {
+ skipCh.publish(testInfo)
+ })
+ } else {
+ // if there is no async resource, the test has been skipped through `test.skip`
+ // or the parent suite is skipped
+ const skippedTestAsyncResource = new AsyncResource('bound-anonymous-fn')
+ if (test.fn) {
+ testToAr.set(test.fn, skippedTestAsyncResource)
+ } else {
+ testToAr.set(test, skippedTestAsyncResource)
+ }
+ skippedTestAsyncResource.runInAsyncScope(() => {
+ skipCh.publish(testInfo)
+ })
+ }
+ }
+}
+module.exports = {
+ isNewTest,
+ retryTest,
+ getSuitesByTestFile,
+ isMochaRetry,
+ getTestFullName,
+ getTestStatus,
+ runnableWrapper,
+ testToAr,
+ originalFns,
+ getTestAsyncResource,
+ testToStartLine,
+ getOnTestHandler,
+ getOnTestEndHandler,
+ getOnHookEndHandler,
+ getOnFailHandler,
+ getOnPendingHandler,
+ testFileToSuiteAr
+}
diff --git a/packages/datadog-instrumentations/src/mocha/worker.js b/packages/datadog-instrumentations/src/mocha/worker.js
new file mode 100644
index 00000000000..c2fa26f1504
--- /dev/null
+++ b/packages/datadog-instrumentations/src/mocha/worker.js
@@ -0,0 +1,51 @@
+'use strict'
+
+const { addHook, channel } = require('../helpers/instrument')
+const shimmer = require('../../../datadog-shimmer')
+
+const {
+ runnableWrapper,
+ getOnTestHandler,
+ getOnTestEndHandler,
+ getOnHookEndHandler,
+ getOnFailHandler,
+ getOnPendingHandler
+} = require('./utils')
+require('./common')
+
+const workerFinishCh = channel('ci:mocha:worker:finish')
+
+// Runner is also hooked in mocha/main.js, but in here we only generate test events.
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/runner.js'
+}, function (Runner) {
+ shimmer.wrap(Runner.prototype, 'run', run => function () {
+ // We flush when the worker ends with its test file (a mocha instance in a worker runs a single test file)
+ this.on('end', () => {
+ workerFinishCh.publish()
+ })
+ this.on('test', getOnTestHandler(false))
+
+ this.on('test end', getOnTestEndHandler())
+
+ // If the hook passes, 'hook end' will be emitted. Otherwise, 'fail' will be emitted
+ this.on('hook end', getOnHookEndHandler())
+
+ this.on('fail', getOnFailHandler(false))
+
+ this.on('pending', getOnPendingHandler())
+
+ return run.apply(this, arguments)
+ })
+ return Runner
+})
+
+// Used both in serial and parallel mode, and by both the main process and the workers
+// Used to set the correct async resource to the test.
+addHook({
+ name: 'mocha',
+ versions: ['>=5.2.0'],
+ file: 'lib/runnable.js'
+}, runnableWrapper)
diff --git a/packages/datadog-instrumentations/src/openai.js b/packages/datadog-instrumentations/src/openai.js
index 998e557a9d8..97462869041 100644
--- a/packages/datadog-instrumentations/src/openai.js
+++ b/packages/datadog-instrumentations/src/openai.js
@@ -15,13 +15,15 @@ const V4_PACKAGE_SHIMS = [
file: 'resources/chat/completions.js',
targetClass: 'Completions',
baseResource: 'chat.completions',
- methods: ['create']
+ methods: ['create'],
+ streamedResponse: true
},
{
file: 'resources/completions.js',
targetClass: 'Completions',
baseResource: 'completions',
- methods: ['create']
+ methods: ['create'],
+ streamedResponse: true
},
{
file: 'resources/embeddings.js',
@@ -121,7 +123,7 @@ addHook({ name: 'openai', file: 'dist/api.js', versions: ['>=3.0.0 <4'] }, expor
return fn.apply(this, arguments)
.then((response) => {
- finishCh.publish({
+ finish({
headers: response.headers,
body: response.data,
path: response.request.path,
@@ -130,10 +132,10 @@ addHook({ name: 'openai', file: 'dist/api.js', versions: ['>=3.0.0 <4'] }, expor
return response
})
- .catch((err) => {
- errorCh.publish({ err })
+ .catch(error => {
+ finish(undefined, error)
- throw err
+ throw error
})
})
}
@@ -141,9 +143,140 @@ addHook({ name: 'openai', file: 'dist/api.js', versions: ['>=3.0.0 <4'] }, expor
return exports
})
+function addStreamedChunk (content, chunk) {
+ content.usage = chunk.usage // add usage if it was specified to be returned
+ for (const choice of chunk.choices) {
+ const choiceIdx = choice.index
+ const oldChoice = content.choices.find(choice => choice?.index === choiceIdx)
+ if (!oldChoice) {
+ // we don't know which choices arrive in which order
+ content.choices[choiceIdx] = choice
+ } else {
+ if (!oldChoice.finish_reason) {
+ oldChoice.finish_reason = choice.finish_reason
+ }
+
+ // delta exists on chat completions
+ const delta = choice.delta
+
+ if (delta) {
+ const content = delta.content
+ if (content) {
+ if (oldChoice.delta.content) { // we don't want to append to undefined
+ oldChoice.delta.content += content
+ } else {
+ oldChoice.delta.content = content
+ }
+ }
+ } else {
+ const text = choice.text
+ if (text) {
+ if (oldChoice.text) {
+ oldChoice.text += text
+ } else {
+ oldChoice.text = text
+ }
+ }
+ }
+
+ // tools only exist on chat completions
+ const tools = delta && choice.delta.tool_calls
+
+ if (tools) {
+ oldChoice.delta.tool_calls = tools.map((newTool, toolIdx) => {
+ const oldTool = oldChoice.delta.tool_calls[toolIdx]
+
+ if (oldTool) {
+ oldTool.function.arguments += newTool.function.arguments
+ }
+
+ return oldTool
+ })
+ }
+ }
+ }
+}
+
+function convertBufferstoObjects (chunks = []) {
+ return Buffer
+ .concat(chunks) // combine the buffers
+ .toString() // stringify
+ .split(/(?=data:)/) // split on "data:"
+ .map(chunk => chunk.split('\n').join('')) // remove newlines
+ .map(chunk => chunk.substring(6)) // remove 'data: ' from the front
+ .slice(0, -1) // remove the last [DONE] message
+ .map(JSON.parse) // parse all of the returned objects
+}
+
+/**
+ * For streamed responses, we need to accumulate all of the content in
+ * the chunks, and let the combined content be the final response.
+ * This way, spans look the same as when not streamed.
+ */
+function wrapStreamIterator (response, options, n) {
+ let processChunksAsBuffers = false
+ let chunks = []
+ return function (itr) {
+ return function () {
+ const iterator = itr.apply(this, arguments)
+ shimmer.wrap(iterator, 'next', next => function () {
+ return next.apply(this, arguments)
+ .then(res => {
+ const { done, value: chunk } = res
+
+ if (chunk) {
+ chunks.push(chunk)
+ if (chunk instanceof Buffer) {
+ // this operation should be safe
+ // if one chunk is a buffer (versus a plain object), the rest should be as well
+ processChunksAsBuffers = true
+ }
+ }
+
+ if (done) {
+ let body = {}
+ chunks = chunks.filter(chunk => chunk != null) // filter null or undefined values
+
+ if (chunks) {
+ if (processChunksAsBuffers) {
+ chunks = convertBufferstoObjects(chunks)
+ }
+
+ if (chunks.length) {
+ // define the initial body having all the content outside of choices from the first chunk
+ // this will include import data like created, id, model, etc.
+ body = { ...chunks[0], choices: Array.from({ length: n }) }
+ // start from the first chunk, and add its choices into the body
+ for (let i = 0; i < chunks.length; i++) {
+ addStreamedChunk(body, chunks[i])
+ }
+ }
+ }
+
+ finish({
+ headers: response.headers,
+ body,
+ path: response.url,
+ method: options.method
+ })
+ }
+
+ return res
+ })
+ .catch(err => {
+ finish(undefined, err)
+
+ throw err
+ })
+ })
+ return iterator
+ }
+ }
+}
+
for (const shim of V4_PACKAGE_SHIMS) {
- const { file, targetClass, baseResource, methods } = shim
- addHook({ name: 'openai', file, versions: shim.versions || ['>=4'] }, exports => {
+ const { file, targetClass, baseResource, methods, versions, streamedResponse } = shim
+ addHook({ name: 'openai', file, versions: versions || ['>=4'] }, exports => {
const targetPrototype = exports[targetClass].prototype
for (const methodName of methods) {
@@ -152,6 +285,22 @@ for (const shim of V4_PACKAGE_SHIMS) {
return methodFn.apply(this, arguments)
}
+ // The OpenAI library lets you set `stream: true` on the options arg to any method
+ // However, we only want to handle streamed responses in specific cases
+ // chat.completions and completions
+ const stream = streamedResponse && getOption(arguments, 'stream', false)
+
+ // we need to compute how many prompts we are sending in streamed cases for completions
+ // not applicable for chat completiond
+ let n
+ if (stream) {
+ n = getOption(arguments, 'n', 1)
+ const prompt = getOption(arguments, 'prompt')
+ if (Array.isArray(prompt) && typeof prompt[0] !== 'number') {
+ n *= prompt.length
+ }
+ }
+
const client = this._client || this.client
startCh.publish({
@@ -170,19 +319,29 @@ for (const shim of V4_PACKAGE_SHIMS) {
// the original response is wrapped in a promise, so we need to unwrap it
.then(body => Promise.all([this.responsePromise, body]))
.then(([{ response, options }, body]) => {
- finishCh.publish({
- headers: response.headers,
- body,
- path: response.url,
- method: options.method
- })
+ if (stream) {
+ if (body.iterator) {
+ shimmer.wrap(body, 'iterator', wrapStreamIterator(response, options, n))
+ } else {
+ shimmer.wrap(
+ body.response.body, Symbol.asyncIterator, wrapStreamIterator(response, options, n)
+ )
+ }
+ } else {
+ finish({
+ headers: response.headers,
+ body,
+ path: response.url,
+ method: options.method
+ })
+ }
return body
})
- .catch(err => {
- errorCh.publish({ err })
+ .catch(error => {
+ finish(undefined, error)
- throw err
+ throw error
})
.finally(() => {
// maybe we don't want to unwrap here in case the promise is re-used?
@@ -197,3 +356,15 @@ for (const shim of V4_PACKAGE_SHIMS) {
return exports
})
}
+
+function finish (response, error) {
+ if (error) {
+ errorCh.publish({ error })
+ }
+
+ finishCh.publish(response)
+}
+
+function getOption (args, option, defaultValue) {
+ return args[args.length - 1]?.[option] || defaultValue
+}
diff --git a/packages/datadog-instrumentations/src/playwright.js b/packages/datadog-instrumentations/src/playwright.js
index 21bbfabdc30..c77b078bdac 100644
--- a/packages/datadog-instrumentations/src/playwright.js
+++ b/packages/datadog-instrumentations/src/playwright.js
@@ -301,7 +301,7 @@ function dispatcherRunWrapperNew (run) {
if (!this._allTests) {
// Removed in https://github.com/microsoft/playwright/commit/1e52c37b254a441cccf332520f60225a5acc14c7
// Not available from >=1.44.0
- this._allTests = testGroups.map(g => g.tests).flat()
+ this._ddAllTests = testGroups.map(g => g.tests).flat()
}
remainingTestsByFile = getTestsBySuiteFromTestGroups(arguments[0])
return run.apply(this, arguments)
@@ -339,8 +339,9 @@ function getTestByTestId (dispatcher, testId) {
if (dispatcher._testById) {
return dispatcher._testById.get(testId)?.test
}
- if (dispatcher._allTests) {
- return dispatcher._allTests.find(({ id }) => id === testId)
+ const allTests = dispatcher._allTests || dispatcher._ddAllTests
+ if (allTests) {
+ return allTests.find(({ id }) => id === testId)
}
}
diff --git a/packages/datadog-instrumentations/src/router.js b/packages/datadog-instrumentations/src/router.js
index 9ac38caf6c6..5768390e6cf 100644
--- a/packages/datadog-instrumentations/src/router.js
+++ b/packages/datadog-instrumentations/src/router.js
@@ -1,6 +1,6 @@
'use strict'
-const METHODS = require('methods').concat('all')
+const METHODS = require('http').METHODS.map(v => v.toLowerCase()).concat('all')
const pathToRegExp = require('path-to-regexp')
const shimmer = require('../../datadog-shimmer')
const { addHook, channel } = require('./helpers/instrument')
diff --git a/packages/datadog-instrumentations/src/selenium.js b/packages/datadog-instrumentations/src/selenium.js
index d018c97dfb9..141aa967e40 100644
--- a/packages/datadog-instrumentations/src/selenium.js
+++ b/packages/datadog-instrumentations/src/selenium.js
@@ -23,6 +23,9 @@ addHook({
}, (seleniumPackage, seleniumVersion) => {
// TODO: do not turn this into async. Use promises
shimmer.wrap(seleniumPackage.WebDriver.prototype, 'get', get => async function () {
+ if (!ciSeleniumDriverGetStartCh.hasSubscribers) {
+ return get.apply(this, arguments)
+ }
let traceId
const setTraceId = (inputTraceId) => {
traceId = inputTraceId
@@ -40,15 +43,20 @@ addHook({
isRumActive
})
- await this.manage().addCookie({
- name: DD_CIVISIBILITY_TEST_EXECUTION_ID_COOKIE_NAME,
- value: traceId
- })
+ if (traceId && isRumActive) {
+ await this.manage().addCookie({
+ name: DD_CIVISIBILITY_TEST_EXECUTION_ID_COOKIE_NAME,
+ value: traceId
+ })
+ }
return getResult
})
shimmer.wrap(seleniumPackage.WebDriver.prototype, 'quit', quit => async function () {
+ if (!ciSeleniumDriverGetStartCh.hasSubscribers) {
+ return quit.apply(this, arguments)
+ }
const isRumActive = await this.executeScript(RUM_STOP_SESSION_SCRIPT)
if (isRumActive) {
@@ -58,10 +66,9 @@ addHook({
resolve()
}, DD_CIVISIBILITY_RUM_FLUSH_WAIT_MILLIS)
})
+ await this.manage().deleteCookie(DD_CIVISIBILITY_TEST_EXECUTION_ID_COOKIE_NAME)
}
- await this.manage().deleteCookie(DD_CIVISIBILITY_TEST_EXECUTION_ID_COOKIE_NAME)
-
return quit.apply(this, arguments)
})
diff --git a/packages/datadog-plugin-http/test/server.spec.js b/packages/datadog-plugin-http/test/server.spec.js
index ba9873427fb..ee38d21789e 100644
--- a/packages/datadog-plugin-http/test/server.spec.js
+++ b/packages/datadog-plugin-http/test/server.spec.js
@@ -1,6 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller') // AbortController is not available in node <15
const getPort = require('get-port')
const agent = require('../../dd-trace/test/plugins/agent')
const axios = require('axios')
diff --git a/packages/datadog-plugin-mocha/src/index.js b/packages/datadog-plugin-mocha/src/index.js
index 597dc7c9cc2..dbe311b9bf1 100644
--- a/packages/datadog-plugin-mocha/src/index.js
+++ b/packages/datadog-plugin-mocha/src/index.js
@@ -20,7 +20,14 @@ const {
removeEfdStringFromTestName,
TEST_IS_NEW,
TEST_IS_RETRY,
- TEST_EARLY_FLAKE_ENABLED
+ TEST_EARLY_FLAKE_ENABLED,
+ TEST_SESSION_ID,
+ TEST_MODULE_ID,
+ TEST_MODULE,
+ TEST_SUITE_ID,
+ TEST_COMMAND,
+ TEST_SUITE,
+ MOCHA_IS_PARALLEL
} = require('../../dd-trace/src/plugins/util/test')
const { COMPONENT } = require('../../dd-trace/src/constants')
const {
@@ -33,6 +40,22 @@ const {
TELEMETRY_ITR_UNSKIPPABLE,
TELEMETRY_CODE_COVERAGE_NUM_FILES
} = require('../../dd-trace/src/ci-visibility/telemetry')
+const id = require('../../dd-trace/src/id')
+const log = require('../../dd-trace/src/log')
+
+function getTestSuiteLevelVisibilityTags (testSuiteSpan) {
+ const testSuiteSpanContext = testSuiteSpan.context()
+ const suiteTags = {
+ [TEST_SUITE_ID]: testSuiteSpanContext.toSpanId(),
+ [TEST_SESSION_ID]: testSuiteSpanContext.toTraceId(),
+ [TEST_COMMAND]: testSuiteSpanContext._tags[TEST_COMMAND],
+ [TEST_MODULE]: 'mocha'
+ }
+ if (testSuiteSpanContext._parentId) {
+ suiteTags[TEST_MODULE_ID] = testSuiteSpanContext._parentId.toString(10)
+ }
+ return suiteTags
+}
class MochaPlugin extends CiPlugin {
static get id () {
@@ -50,7 +73,8 @@ class MochaPlugin extends CiPlugin {
if (!this.libraryConfig?.isCodeCoverageEnabled) {
return
}
- const testSuiteSpan = this._testSuites.get(suiteFile)
+ const testSuite = getTestSuitePath(suiteFile, this.sourceRoot)
+ const testSuiteSpan = this._testSuites.get(testSuite)
if (!coverageFiles.length) {
this.telemetry.count(TELEMETRY_CODE_COVERAGE_EMPTY)
@@ -73,16 +97,20 @@ class MochaPlugin extends CiPlugin {
})
this.addSub('ci:mocha:test-suite:start', ({
- testSuite,
+ testSuiteAbsolutePath,
isUnskippable,
isForcedToRun,
itrCorrelationId
}) => {
- const store = storage.getStore()
+ // If the test module span is undefined, the plugin has not been initialized correctly and we bail out
+ if (!this.testModuleSpan) {
+ return
+ }
+ const testSuite = getTestSuitePath(testSuiteAbsolutePath, this.sourceRoot)
const testSuiteMetadata = getTestSuiteCommonTags(
this.command,
this.frameworkVersion,
- getTestSuitePath(testSuite, this.sourceRoot),
+ testSuite,
'mocha'
)
if (isUnskippable) {
@@ -109,6 +137,7 @@ class MochaPlugin extends CiPlugin {
if (itrCorrelationId) {
testSuiteSpan.setTag(ITR_CORRELATION_ID, itrCorrelationId)
}
+ const store = storage.getStore()
this.enter(testSuiteSpan, store)
this._testSuites.set(testSuite, testSuiteSpan)
})
@@ -142,6 +171,10 @@ class MochaPlugin extends CiPlugin {
this.enter(span, store)
})
+ this.addSub('ci:mocha:worker:finish', () => {
+ this.tracer._exporter.flush()
+ })
+
this.addSub('ci:mocha:test:finish', (status) => {
const store = storage.getStore()
const span = store?.span
@@ -194,7 +227,8 @@ class MochaPlugin extends CiPlugin {
hasForcedToRunSuites,
hasUnskippableSuites,
error,
- isEarlyFlakeDetectionEnabled
+ isEarlyFlakeDetectionEnabled,
+ isParallel
}) => {
if (this.testSessionSpan) {
const { isSuitesSkippingEnabled, isCodeCoverageEnabled } = this.libraryConfig || {}
@@ -206,6 +240,10 @@ class MochaPlugin extends CiPlugin {
this.testModuleSpan.setTag('error', error)
}
+ if (isParallel) {
+ this.testSessionSpan.setTag(MOCHA_IS_PARALLEL, 'true')
+ }
+
addIntelligentTestRunnerSpanTags(
this.testSessionSpan,
this.testModuleSpan,
@@ -234,6 +272,37 @@ class MochaPlugin extends CiPlugin {
this.libraryConfig = null
this.tracer._exporter.flush()
})
+
+ this.addSub('ci:mocha:worker-report:trace', (traces) => {
+ const formattedTraces = JSON.parse(traces).map(trace =>
+ trace.map(span => {
+ const formattedSpan = {
+ ...span,
+ span_id: id(span.span_id),
+ trace_id: id(span.trace_id),
+ parent_id: id(span.parent_id)
+ }
+ if (formattedSpan.name === 'mocha.test') {
+ const testSuite = span.meta[TEST_SUITE]
+ const testSuiteSpan = this._testSuites.get(testSuite)
+ if (!testSuiteSpan) {
+ log.warn(`Test suite span not found for test span with test suite ${testSuite}`)
+ return formattedSpan
+ }
+ const suiteTags = getTestSuiteLevelVisibilityTags(testSuiteSpan)
+ formattedSpan.meta = {
+ ...formattedSpan.meta,
+ ...suiteTags
+ }
+ }
+ return formattedSpan
+ })
+ )
+
+ formattedTraces.forEach(trace => {
+ this.tracer._exporter.export(trace)
+ })
+ })
}
startTestSpan (testInfo) {
@@ -242,7 +311,8 @@ class MochaPlugin extends CiPlugin {
title,
isNew,
isEfdRetry,
- testStartLine
+ testStartLine,
+ isParallel
} = testInfo
const testName = removeEfdStringFromTestName(testInfo.testName)
@@ -257,8 +327,12 @@ class MochaPlugin extends CiPlugin {
extraTags[TEST_SOURCE_START] = testStartLine
}
+ if (isParallel) {
+ extraTags[MOCHA_IS_PARALLEL] = 'true'
+ }
+
const testSuite = getTestSuitePath(testSuiteAbsolutePath, this.sourceRoot)
- const testSuiteSpan = this._testSuites.get(testSuiteAbsolutePath)
+ const testSuiteSpan = this._testSuites.get(testSuite)
if (this.repositoryRoot !== this.sourceRoot && !!this.repositoryRoot) {
extraTags[TEST_SOURCE_FILE] = getTestSuitePath(testSuiteAbsolutePath, this.repositoryRoot)
diff --git a/packages/datadog-plugin-mocha/test/index.spec.js b/packages/datadog-plugin-mocha/test/index.spec.js
index e88fab23370..52110dedae8 100644
--- a/packages/datadog-plugin-mocha/test/index.spec.js
+++ b/packages/datadog-plugin-mocha/test/index.spec.js
@@ -142,6 +142,7 @@ describe('Plugin', () => {
mocha.addFile(testFilePath)
mocha.run()
})
+
it('works with failing tests', (done) => {
const testFilePath = path.join(__dirname, 'mocha-test-fail.js')
const testSuite = testFilePath.replace(`${process.cwd()}/`, '')
@@ -178,6 +179,7 @@ describe('Plugin', () => {
mocha.addFile(testFilePath)
mocha.run()
})
+
it('works with skipping tests', (done) => {
const testFilePath = path.join(__dirname, 'mocha-test-skip.js')
const testNames = [
diff --git a/packages/datadog-plugin-next/src/index.js b/packages/datadog-plugin-next/src/index.js
index 0ffad16b5b6..1dff5bec4e9 100644
--- a/packages/datadog-plugin-next/src/index.js
+++ b/packages/datadog-plugin-next/src/index.js
@@ -6,7 +6,7 @@ const analyticsSampler = require('../../dd-trace/src/analytics_sampler')
const { COMPONENT } = require('../../dd-trace/src/constants')
const web = require('../../dd-trace/src/plugins/util/web')
-const errorPages = ['/404', '/500', '/_error', '/_not-found']
+const errorPages = ['/404', '/500', '/_error', '/_not-found', '/_not-found/page']
class NextPlugin extends ServerPlugin {
static get id () {
@@ -120,7 +120,6 @@ class NextPlugin extends ServerPlugin {
'resource.name': `${req.method} ${page}`.trim(),
'next.page': page
})
-
web.setRoute(req, page)
}
diff --git a/packages/datadog-plugin-openai/src/index.js b/packages/datadog-plugin-openai/src/index.js
index 4bb31bd8c6d..aa00beb0a44 100644
--- a/packages/datadog-plugin-openai/src/index.js
+++ b/packages/datadog-plugin-openai/src/index.js
@@ -15,6 +15,14 @@ const RE_TAB = /\t/g
// TODO: In the future we should refactor config.js to make it requirable
let MAX_TEXT_LEN = 128
+let encodingForModel
+try {
+ // eslint-disable-next-line import/no-extraneous-dependencies
+ encodingForModel = require('tiktoken').encoding_for_model
+} catch {
+ // we will use token count estimations in this case
+}
+
class OpenApiPlugin extends TracingPlugin {
static get id () { return 'openai' }
static get operation () { return 'request' }
@@ -112,6 +120,10 @@ class OpenApiPlugin extends TracingPlugin {
}
}
+ if (payload.stream) {
+ tags['openai.request.stream'] = payload.stream
+ }
+
switch (methodName) {
case 'createFineTune':
case 'fine_tuning.jobs.create':
@@ -175,12 +187,21 @@ class OpenApiPlugin extends TracingPlugin {
span.addTags(tags)
}
- finish ({ headers, body, method, path }) {
- if (headers.constructor.name === 'Headers') {
- headers = Object.fromEntries(headers)
+ finish (response) {
+ const span = this.activeSpan
+ const error = !!span.context()._tags.error
+
+ let headers, body, method, path
+ if (!error) {
+ headers = response.headers
+ body = response.body
+ method = response.method
+ path = response.path
}
- const span = this.activeSpan
+ if (!error && headers?.constructor.name === 'Headers') {
+ headers = Object.fromEntries(headers)
+ }
const methodName = span._spanContext._tags['resource.name']
body = coerceResponseBody(body, methodName)
@@ -188,88 +209,98 @@ class OpenApiPlugin extends TracingPlugin {
const fullStore = storage.getStore()
const store = fullStore.openai
- if (path.startsWith('https://') || path.startsWith('http://')) {
+ if (!error && (path.startsWith('https://') || path.startsWith('http://'))) {
// basic checking for if the path was set as a full URL
// not using a full regex as it will likely be "https://api.openai.com/..."
path = new URL(path).pathname
}
const endpoint = lookupOperationEndpoint(methodName, path)
- const tags = {
- 'openai.request.endpoint': endpoint,
- 'openai.request.method': method.toUpperCase(),
+ const tags = error
+ ? {}
+ : {
+ 'openai.request.endpoint': endpoint,
+ 'openai.request.method': method.toUpperCase(),
- 'openai.organization.id': body.organization_id, // only available in fine-tunes endpoints
- 'openai.organization.name': headers['openai-organization'],
+ 'openai.organization.id': body.organization_id, // only available in fine-tunes endpoints
+ 'openai.organization.name': headers['openai-organization'],
- 'openai.response.model': headers['openai-model'] || body.model, // specific model, often undefined
- 'openai.response.id': body.id, // common creation value, numeric epoch
- 'openai.response.deleted': body.deleted, // common boolean field in delete responses
+ 'openai.response.model': headers['openai-model'] || body.model, // specific model, often undefined
+ 'openai.response.id': body.id, // common creation value, numeric epoch
+ 'openai.response.deleted': body.deleted, // common boolean field in delete responses
- // The OpenAI API appears to use both created and created_at in different places
- // Here we're conciously choosing to surface this inconsistency instead of normalizing
- 'openai.response.created': body.created,
- 'openai.response.created_at': body.created_at
- }
+ // The OpenAI API appears to use both created and created_at in different places
+ // Here we're conciously choosing to surface this inconsistency instead of normalizing
+ 'openai.response.created': body.created,
+ 'openai.response.created_at': body.created_at
+ }
responseDataExtractionByMethod(methodName, tags, body, store)
span.addTags(tags)
super.finish()
- this.sendLog(methodName, span, tags, store, false)
- this.sendMetrics(headers, body, endpoint, span._duration)
+ this.sendLog(methodName, span, tags, store, error)
+ this.sendMetrics(headers, body, endpoint, span._duration, error, tags)
}
- error (...args) {
- super.error(...args)
-
- const span = this.activeSpan
- const methodName = span._spanContext._tags['resource.name']
-
- const fullStore = storage.getStore()
- const store = fullStore.openai
-
- // We don't know most information about the request when it fails
+ sendMetrics (headers, body, endpoint, duration, error, spanTags) {
+ const tags = [`error:${Number(!!error)}`]
+ if (error) {
+ this.metrics.increment('openai.request.error', 1, tags)
+ } else {
+ tags.push(`org:${headers['openai-organization']}`)
+ tags.push(`endpoint:${endpoint}`) // just "/v1/models", no method
+ tags.push(`model:${headers['openai-model'] || body.model}`)
+ }
- const tags = ['error:1']
- this.metrics.distribution('openai.request.duration', span._duration * 1000, tags)
- this.metrics.increment('openai.request.error', 1, tags)
+ this.metrics.distribution('openai.request.duration', duration * 1000, tags)
- this.sendLog(methodName, span, {}, store, true)
- }
+ const promptTokens = spanTags['openai.response.usage.prompt_tokens']
+ const promptTokensEstimated = spanTags['openai.response.usage.prompt_tokens_estimated']
- sendMetrics (headers, body, endpoint, duration) {
- const tags = [
- `org:${headers['openai-organization']}`,
- `endpoint:${endpoint}`, // just "/v1/models", no method
- `model:${headers['openai-model']}`,
- 'error:0'
- ]
+ const completionTokens = spanTags['openai.response.usage.completion_tokens']
+ const completionTokensEstimated = spanTags['openai.response.usage.completion_tokens_estimated']
- this.metrics.distribution('openai.request.duration', duration * 1000, tags)
+ if (!error) {
+ if (promptTokensEstimated) {
+ this.metrics.distribution(
+ 'openai.tokens.prompt', promptTokens, [...tags, 'openai.estimated:true'])
+ } else {
+ this.metrics.distribution('openai.tokens.prompt', promptTokens, tags)
+ }
+ if (completionTokensEstimated) {
+ this.metrics.distribution(
+ 'openai.tokens.completion', completionTokens, [...tags, 'openai.estimated:true'])
+ } else {
+ this.metrics.distribution('openai.tokens.completion', completionTokens, tags)
+ }
- if (body && body.usage) {
- const promptTokens = body.usage.prompt_tokens
- const completionTokens = body.usage.completion_tokens
- this.metrics.distribution('openai.tokens.prompt', promptTokens, tags)
- this.metrics.distribution('openai.tokens.completion', completionTokens, tags)
- this.metrics.distribution('openai.tokens.total', promptTokens + completionTokens, tags)
+ if (promptTokensEstimated || completionTokensEstimated) {
+ this.metrics.distribution(
+ 'openai.tokens.total', promptTokens + completionTokens, [...tags, 'openai.estimated:true'])
+ } else {
+ this.metrics.distribution('openai.tokens.total', promptTokens + completionTokens, tags)
+ }
}
- if (headers['x-ratelimit-limit-requests']) {
- this.metrics.gauge('openai.ratelimit.requests', Number(headers['x-ratelimit-limit-requests']), tags)
- }
+ if (headers) {
+ if (headers['x-ratelimit-limit-requests']) {
+ this.metrics.gauge('openai.ratelimit.requests', Number(headers['x-ratelimit-limit-requests']), tags)
+ }
- if (headers['x-ratelimit-remaining-requests']) {
- this.metrics.gauge('openai.ratelimit.remaining.requests', Number(headers['x-ratelimit-remaining-requests']), tags)
- }
+ if (headers['x-ratelimit-remaining-requests']) {
+ this.metrics.gauge(
+ 'openai.ratelimit.remaining.requests', Number(headers['x-ratelimit-remaining-requests']), tags
+ )
+ }
- if (headers['x-ratelimit-limit-tokens']) {
- this.metrics.gauge('openai.ratelimit.tokens', Number(headers['x-ratelimit-limit-tokens']), tags)
- }
+ if (headers['x-ratelimit-limit-tokens']) {
+ this.metrics.gauge('openai.ratelimit.tokens', Number(headers['x-ratelimit-limit-tokens']), tags)
+ }
- if (headers['x-ratelimit-remaining-tokens']) {
- this.metrics.gauge('openai.ratelimit.remaining.tokens', Number(headers['x-ratelimit-remaining-tokens']), tags)
+ if (headers['x-ratelimit-remaining-tokens']) {
+ this.metrics.gauge('openai.ratelimit.remaining.tokens', Number(headers['x-ratelimit-remaining-tokens']), tags)
+ }
}
}
@@ -287,6 +318,89 @@ class OpenApiPlugin extends TracingPlugin {
}
}
+function countPromptTokens (methodName, payload, model) {
+ let promptTokens = 0
+ let promptEstimated = false
+ if (methodName === 'chat.completions.create') {
+ const messages = payload.messages
+ for (const message of messages) {
+ const content = message.content
+ const { tokens, estimated } = countTokens(content, model)
+ promptTokens += tokens
+ promptEstimated = estimated
+ }
+ } else if (methodName === 'completions.create') {
+ let prompt = payload.prompt
+ if (!Array.isArray(prompt)) prompt = [prompt]
+
+ for (const p of prompt) {
+ const { tokens, estimated } = countTokens(p, model)
+ promptTokens += tokens
+ promptEstimated = estimated
+ }
+ }
+
+ return { promptTokens, promptEstimated }
+}
+
+function countCompletionTokens (body, model) {
+ let completionTokens = 0
+ let completionEstimated = false
+ if (body?.choices) {
+ for (const choice of body.choices) {
+ const message = choice.message || choice.delta // delta for streamed responses
+ const text = choice.text
+ const content = text || message?.content
+
+ const { tokens, estimated } = countTokens(content, model)
+ completionTokens += tokens
+ completionEstimated = estimated
+ }
+ }
+
+ return { completionTokens, completionEstimated }
+}
+
+function countTokens (content, model) {
+ if (encodingForModel) {
+ try {
+ // try using tiktoken if it was available
+ const encoder = encodingForModel(model)
+ const tokens = encoder.encode(content).length
+ encoder.free()
+ return { tokens, estimated: false }
+ } catch {
+ // possible errors from tiktoken:
+ // * model not available for token counts
+ // * issue encoding content
+ }
+ }
+
+ return {
+ tokens: estimateTokens(content),
+ estimated: true
+ }
+}
+
+// If model is unavailable or tiktoken is not imported, then provide a very rough estimate of the number of tokens
+// Approximate using the following assumptions:
+// * English text
+// * 1 token ~= 4 chars
+// * 1 token ~= ¾ words
+function estimateTokens (content) {
+ let estimatedTokens = 0
+ if (typeof content === 'string') {
+ const estimation1 = content.length / 4
+
+ const matches = content.match(/[\w']+|[.,!?;~@#$%^&*()+/-]/g)
+ const estimation2 = matches ? matches.length * 0.75 : 0 // in the case of an empty string
+ estimatedTokens = Math.round((1.5 * estimation1 + 0.5 * estimation2) / 2)
+ } else if (Array.isArray(content) && typeof content[0] === 'number') {
+ estimatedTokens = content.length
+ }
+ return estimatedTokens
+}
+
function createEditRequestExtraction (tags, payload, store) {
const instruction = payload.instruction
tags['openai.request.instruction'] = instruction
@@ -298,7 +412,8 @@ function retrieveModelRequestExtraction (tags, payload) {
}
function createChatCompletionRequestExtraction (tags, payload, store) {
- if (!defensiveArrayLength(payload.messages)) return
+ const messages = payload.messages
+ if (!defensiveArrayLength(messages)) return
store.messages = payload.messages
for (let i = 0; i < payload.messages.length; i++) {
@@ -344,7 +459,7 @@ function responseDataExtractionByMethod (methodName, tags, body, store) {
case 'chat.completions.create':
case 'createEdit':
case 'edits.create':
- commonCreateResponseExtraction(tags, body, store)
+ commonCreateResponseExtraction(tags, body, store, methodName)
break
case 'listFiles':
@@ -580,8 +695,8 @@ function createModerationResponseExtraction (tags, body) {
}
// createCompletion, createChatCompletion, createEdit
-function commonCreateResponseExtraction (tags, body, store) {
- usageExtraction(tags, body)
+function commonCreateResponseExtraction (tags, body, store, methodName) {
+ usageExtraction(tags, body, methodName)
if (!body.choices) return
@@ -600,18 +715,20 @@ function commonCreateResponseExtraction (tags, body, store) {
tags[`openai.response.choices.${choiceIdx}.text`] = truncateText(choice.text)
// createChatCompletion only
- if (choice.message) {
- const message = choice.message
+ const message = choice.message || choice.delta // delta for streamed responses
+ if (message) {
tags[`openai.response.choices.${choiceIdx}.message.role`] = message.role
tags[`openai.response.choices.${choiceIdx}.message.content`] = truncateText(message.content)
tags[`openai.response.choices.${choiceIdx}.message.name`] = truncateText(message.name)
if (message.tool_calls) {
const toolCalls = message.tool_calls
for (let toolIdx = 0; toolIdx < toolCalls.length; toolIdx++) {
- tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.name`] =
+ tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.function.name`] =
toolCalls[toolIdx].function.name
- tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.arguments`] =
+ tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.function.arguments`] =
toolCalls[toolIdx].function.arguments
+ tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.id`] =
+ toolCalls[toolIdx].id
}
}
}
@@ -619,11 +736,40 @@ function commonCreateResponseExtraction (tags, body, store) {
}
// createCompletion, createChatCompletion, createEdit, createEmbedding
-function usageExtraction (tags, body) {
- if (typeof body.usage !== 'object' || !body.usage) return
- tags['openai.response.usage.prompt_tokens'] = body.usage.prompt_tokens
- tags['openai.response.usage.completion_tokens'] = body.usage.completion_tokens
- tags['openai.response.usage.total_tokens'] = body.usage.total_tokens
+function usageExtraction (tags, body, methodName) {
+ let promptTokens = 0
+ let completionTokens = 0
+ let totalTokens = 0
+ if (body && body.usage) {
+ promptTokens = body.usage.prompt_tokens
+ completionTokens = body.usage.completion_tokens
+ totalTokens = body.usage.total_tokens
+ } else if (['chat.completions.create', 'completions.create'].includes(methodName)) {
+ // estimate tokens based on method name for completions and chat completions
+ const { model } = body
+ let promptEstimated = false
+ let completionEstimated = false
+
+ // prompt tokens
+ const payload = storage.getStore().openai
+ const promptTokensCount = countPromptTokens(methodName, payload, model)
+ promptTokens = promptTokensCount.promptTokens
+ promptEstimated = promptTokensCount.promptEstimated
+
+ // completion tokens
+ const completionTokensCount = countCompletionTokens(body, model)
+ completionTokens = completionTokensCount.completionTokens
+ completionEstimated = completionTokensCount.completionEstimated
+
+ // total tokens
+ totalTokens = promptTokens + completionTokens
+ if (promptEstimated) tags['openai.response.usage.prompt_tokens_estimated'] = true
+ if (completionEstimated) tags['openai.response.usage.completion_tokens_estimated'] = true
+ }
+
+ if (promptTokens) tags['openai.response.usage.prompt_tokens'] = promptTokens
+ if (completionTokens) tags['openai.response.usage.completion_tokens'] = completionTokens
+ if (totalTokens) tags['openai.response.usage.total_tokens'] = totalTokens
}
function truncateApiKey (apiKey) {
diff --git a/packages/datadog-plugin-openai/test/index.spec.js b/packages/datadog-plugin-openai/test/index.spec.js
index dedb585d550..cdbcb72b969 100644
--- a/packages/datadog-plugin-openai/test/index.spec.js
+++ b/packages/datadog-plugin-openai/test/index.spec.js
@@ -12,12 +12,9 @@ const agent = require('../../dd-trace/test/plugins/agent')
const { DogStatsDClient } = require('../../dd-trace/src/dogstatsd')
const { NoopExternalLogger } = require('../../dd-trace/src/external-logger/src')
const Sampler = require('../../dd-trace/src/sampler')
-const { DD_MAJOR } = require('../../../version')
const tracerRequirePath = '../../dd-trace'
-const VERSIONS_TO_TEST = DD_MAJOR >= 4 ? '>=3' : '>=3 <4'
-
describe('Plugin', () => {
let openai
let clock
@@ -26,7 +23,7 @@ describe('Plugin', () => {
let realVersion
describe('openai', () => {
- withVersions('openai', 'openai', VERSIONS_TO_TEST, version => {
+ withVersions('openai', 'openai', version => {
const moduleRequirePath = `../../../versions/openai@${version}`
beforeEach(() => {
@@ -88,16 +85,73 @@ describe('Plugin', () => {
})
})
- describe('create completion', () => {
+ describe('with error', () => {
let scope
- after(() => {
+ beforeEach(() => {
+ scope = nock('https://api.openai.com:443')
+ .get('/v1/models')
+ .reply(400, {
+ error: {
+ message: 'fake message',
+ type: 'fake type',
+ param: 'fake param',
+ code: null
+ }
+ })
+ })
+
+ afterEach(() => {
nock.removeInterceptor(scope)
scope.done()
})
+ it('should attach the error to the span', async () => {
+ const checkTraces = agent
+ .use(traces => {
+ expect(traces[0][0]).to.have.property('error', 1)
+ // the message content differs on OpenAI version, even between patches
+ expect(traces[0][0].meta['error.message']).to.exist
+ expect(traces[0][0].meta).to.have.property('error.type', 'Error')
+ expect(traces[0][0].meta['error.stack']).to.exist
+ })
+
+ try {
+ if (semver.satisfies(realVersion, '>=4.0.0')) {
+ await openai.models.list()
+ } else {
+ await openai.listModels()
+ }
+ } catch {
+ // ignore, we expect an error
+ }
+
+ await checkTraces
+
+ clock.tick(10 * 1000)
+
+ const expectedTags = ['error:1']
+
+ expect(metricStub).to.have.been.calledWith('openai.request.error', 1, 'c', expectedTags)
+ expect(metricStub).to.have.been.calledWith('openai.request.duration') // timing value not guaranteed
+
+ expect(metricStub).to.not.have.been.calledWith('openai.tokens.prompt')
+ expect(metricStub).to.not.have.been.calledWith('openai.tokens.completion')
+ expect(metricStub).to.not.have.been.calledWith('openai.tokens.total')
+ expect(metricStub).to.not.have.been.calledWith('openai.ratelimit.requests')
+ expect(metricStub).to.not.have.been.calledWith('openai.ratelimit.tokens')
+ expect(metricStub).to.not.have.been.calledWith('openai.ratelimit.remaining.requests')
+ expect(metricStub).to.not.have.been.calledWith('openai.ratelimit.remaining.tokens')
+ })
+ })
+
+ describe('create completion', () => {
+ afterEach(() => {
+ nock.cleanAll()
+ })
+
it('makes a successful call', async () => {
- scope = nock('https://api.openai.com:443')
+ nock('https://api.openai.com:443')
.post('/v1/completions')
.reply(200, {
id: 'cmpl-7GWDlQbOrAYGmeFZtoRdOEjDXDexM',
@@ -202,10 +256,10 @@ describe('Plugin', () => {
clock.tick(10 * 1000)
const expectedTags = [
+ 'error:0',
'org:kill-9',
'endpoint:/v1/completions',
- 'model:text-davinci-002',
- 'error:0'
+ 'model:text-davinci-002'
]
expect(metricStub).to.have.been.calledWith('openai.request.duration') // timing value not guaranteed
@@ -236,7 +290,7 @@ describe('Plugin', () => {
})
it('should not throw with empty response body', async () => {
- scope = nock('https://api.openai.com:443')
+ nock('https://api.openai.com:443')
.post('/v1/completions')
.reply(200, {}, [
'Date', 'Mon, 15 May 2023 17:24:22 GMT',
@@ -264,8 +318,7 @@ describe('Plugin', () => {
const params = {
model: 'text-davinci-002',
prompt: 'Hello, ',
- suffix: 'foo',
- stream: true
+ suffix: 'foo'
}
if (semver.satisfies(realVersion, '>=4.0.0')) {
@@ -280,11 +333,13 @@ describe('Plugin', () => {
})
})
- describe('create embedding', () => {
- let scope
+ describe('create embedding with stream:true', () => {
+ after(() => {
+ nock.cleanAll()
+ })
- before(() => {
- scope = nock('https://api.openai.com:443')
+ it('makes a successful call', async () => {
+ nock('https://api.openai.com:443')
.post('/v1/embeddings')
.reply(200, {
object: 'list',
@@ -307,14 +362,80 @@ describe('Plugin', () => {
'openai-processing-ms', '344',
'openai-version', '2020-10-01'
])
- })
- after(() => {
- nock.removeInterceptor(scope)
- scope.done()
+ const checkTraces = agent
+ .use(traces => {
+ expect(traces[0][0]).to.have.property('name', 'openai.request')
+ expect(traces[0][0]).to.have.property('type', 'openai')
+ if (semver.satisfies(realVersion, '>=4.0.0')) {
+ expect(traces[0][0]).to.have.property('resource', 'embeddings.create')
+ } else {
+ expect(traces[0][0]).to.have.property('resource', 'createEmbedding')
+ }
+ expect(traces[0][0]).to.have.property('error', 0)
+ expect(traces[0][0].meta).to.have.property('openai.request.endpoint', '/v1/embeddings')
+ expect(traces[0][0].meta).to.have.property('openai.request.method', 'POST')
+
+ expect(traces[0][0].meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(traces[0][0].meta).to.have.property('openai.request.input', 'Cat?')
+ expect(traces[0][0].meta).to.have.property('openai.request.model', 'text-embedding-ada-002')
+ expect(traces[0][0].meta).to.have.property('openai.request.user', 'hunter2')
+ expect(traces[0][0].meta).to.have.property('openai.response.model', 'text-embedding-ada-002-v2')
+ expect(traces[0][0].metrics).to.have.property('openai.response.embeddings_count', 1)
+ expect(traces[0][0].metrics).to.have.property('openai.response.embedding.0.embedding_length', 2)
+ expect(traces[0][0].metrics).to.have.property('openai.response.usage.prompt_tokens', 2)
+ expect(traces[0][0].metrics).to.have.property('openai.response.usage.total_tokens', 2)
+ })
+
+ const params = {
+ model: 'text-embedding-ada-002',
+ input: 'Cat?',
+ user: 'hunter2'
+ }
+
+ if (semver.satisfies(realVersion, '>=4.0.0')) {
+ const result = await openai.embeddings.create(params)
+ expect(result.model).to.eql('text-embedding-ada-002-v2')
+ } else {
+ const result = await openai.createEmbedding(params)
+ expect(result.data.model).to.eql('text-embedding-ada-002-v2')
+ }
+
+ await checkTraces
+
+ expect(externalLoggerStub).to.have.been.calledWith({
+ status: 'info',
+ message: semver.satisfies(realVersion, '>=4.0.0') ? 'sampled embeddings.create' : 'sampled createEmbedding',
+ input: 'Cat?'
+ })
})
- it('makes a successful call', async () => {
+ it('makes a successful call with stream true', async () => {
+ // Testing that adding stream:true to the params doesn't break the instrumentation
+ nock('https://api.openai.com:443')
+ .post('/v1/embeddings')
+ .reply(200, {
+ object: 'list',
+ data: [{
+ object: 'embedding',
+ index: 0,
+ embedding: [-0.0034387498, -0.026400521]
+ }],
+ model: 'text-embedding-ada-002-v2',
+ usage: {
+ prompt_tokens: 2,
+ total_tokens: 2
+ }
+ }, [
+ 'Date', 'Mon, 15 May 2023 20:49:06 GMT',
+ 'Content-Type', 'application/json',
+ 'Content-Length', '75',
+ 'access-control-allow-origin', '*',
+ 'openai-organization', 'kill-9',
+ 'openai-processing-ms', '344',
+ 'openai-version', '2020-10-01'
+ ])
+
const checkTraces = agent
.use(traces => {
expect(traces[0][0]).to.have.property('name', 'openai.request')
@@ -342,7 +463,8 @@ describe('Plugin', () => {
const params = {
model: 'text-embedding-ada-002',
input: 'Cat?',
- user: 'hunter2'
+ user: 'hunter2',
+ stream: true
}
if (semver.satisfies(realVersion, '>=4.0.0')) {
@@ -638,10 +760,10 @@ describe('Plugin', () => {
await checkTraces
const expectedTags = [
+ 'error:0',
'org:kill-9',
'endpoint:/v1/edits',
- 'model:text-davinci-edit:001',
- 'error:0'
+ 'model:text-davinci-edit:001'
]
expect(metricStub).to.be.calledWith('openai.ratelimit.requests', 20, 'g', expectedTags)
@@ -2639,9 +2761,10 @@ describe('Plugin', () => {
const checkTraces = agent
.use(traces => {
expect(traces[0][0].meta)
- .to.have.property('openai.response.choices.0.message.tool_calls.0.name', 'extract_fictional_info')
+ .to.have.property('openai.response.choices.0.message.tool_calls.0.function.name',
+ 'extract_fictional_info')
expect(traces[0][0].meta)
- .to.have.property('openai.response.choices.0.message.tool_calls.0.arguments',
+ .to.have.property('openai.response.choices.0.message.tool_calls.0.function.arguments',
'{"name":"SpongeBob","origin":"Bikini Bottom"}')
expect(traces[0][0].meta).to.have.property('openai.response.choices.0.finish_reason', 'tool_calls')
})
@@ -2928,6 +3051,398 @@ describe('Plugin', () => {
})
})
}
+
+ if (semver.intersects('>4.1.0', version)) {
+ describe('streamed responses', () => {
+ afterEach(() => {
+ nock.cleanAll()
+ })
+
+ it('makes a successful chat completion call', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/chat/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+ expect(span).to.have.property('name', 'openai.request')
+ expect(span).to.have.property('type', 'openai')
+ expect(span).to.have.property('error', 0)
+ expect(span.meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(span.meta).to.have.property('openai.request.method', 'POST')
+ expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions')
+ expect(span.meta).to.have.property('openai.request.model', 'gpt-4o')
+ expect(span.meta).to.have.property('openai.request.messages.0.content',
+ 'Hello, OpenAI!')
+ expect(span.meta).to.have.property('openai.request.messages.0.role', 'user')
+ expect(span.meta).to.have.property('openai.request.messages.0.name', 'hunter2')
+ expect(span.meta).to.have.property('openai.response.choices.0.finish_reason', 'stop')
+ expect(span.meta).to.have.property('openai.response.choices.0.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.role', 'assistant')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.content',
+ 'Hello! How can I assist you today?')
+
+ // token metrics - these should be estimated counts
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ expect(span.metrics).to.have.property('openai.response.usage.completion_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ expect(span.metrics).to.have.property('openai.response.usage.total_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ })
+
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-4o',
+ messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }],
+ temperature: 0.5,
+ stream: true
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ expect(part.choices[0]).to.have.property('delta')
+ }
+
+ await checkTraces
+
+ expect(metricStub).to.have.been.calledWith('openai.tokens.prompt')
+ expect(metricStub).to.have.been.calledWith('openai.tokens.completion')
+ expect(metricStub).to.have.been.calledWith('openai.tokens.total')
+ })
+
+ it('makes a successful chat completion call with empty stream', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/chat/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.empty.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+ expect(span).to.have.property('name', 'openai.request')
+ expect(span).to.have.property('type', 'openai')
+ expect(span).to.have.property('error', 0)
+ expect(span.meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(span.meta).to.have.property('openai.request.method', 'POST')
+ expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions')
+ expect(span.meta).to.have.property('openai.request.model', 'gpt-4o')
+ expect(span.meta).to.have.property('openai.request.messages.0.content', 'Hello, OpenAI!')
+ expect(span.meta).to.have.property('openai.request.messages.0.role', 'user')
+ expect(span.meta).to.have.property('openai.request.messages.0.name', 'hunter2')
+ })
+
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-4o',
+ messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }],
+ temperature: 0.5,
+ stream: true
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ }
+
+ await checkTraces
+ })
+
+ it('makes a successful chat completion call with multiple choices', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/chat/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.multiple.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+ expect(span).to.have.property('name', 'openai.request')
+ expect(span).to.have.property('type', 'openai')
+ expect(span).to.have.property('error', 0)
+ expect(span.meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(span.meta).to.have.property('openai.request.method', 'POST')
+ expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions')
+ expect(span.meta).to.have.property('openai.request.model', 'gpt-4')
+ expect(span.meta).to.have.property('openai.request.messages.0.content', 'How are you?')
+ expect(span.meta).to.have.property('openai.request.messages.0.role', 'user')
+ expect(span.meta).to.have.property('openai.request.messages.0.name', 'hunter2')
+
+ // message 0
+ expect(span.meta).to.have.property('openai.response.choices.0.finish_reason', 'stop')
+ expect(span.meta).to.have.property('openai.response.choices.0.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.role', 'assistant')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.content',
+ 'As an AI, I don\'t have feelings, but I\'m here to assist you. How can I help you today?'
+ )
+
+ // message 1
+ expect(span.meta).to.have.property('openai.response.choices.1.finish_reason', 'stop')
+ expect(span.meta).to.have.property('openai.response.choices.1.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.1.message.role', 'assistant')
+ expect(span.meta).to.have.property('openai.response.choices.1.message.content',
+ 'I\'m just a computer program so I don\'t have feelings, ' +
+ 'but I\'m here and ready to help you with anything you need. How can I assis...'
+ )
+
+ // message 2
+ expect(span.meta).to.have.property('openai.response.choices.2.finish_reason', 'stop')
+ expect(span.meta).to.have.property('openai.response.choices.2.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.2.message.role', 'assistant')
+ expect(span.meta).to.have.property('openai.response.choices.2.message.content',
+ 'I\'m just a computer program, so I don\'t have feelings like humans do. ' +
+ 'I\'m here and ready to assist you with any questions or tas...'
+ )
+ })
+
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'How are you?', name: 'hunter2' }],
+ stream: true,
+ n: 3
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ expect(part.choices[0]).to.have.property('delta')
+ }
+
+ await checkTraces
+ })
+
+ it('makes a successful chat completion call with usage included', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/chat/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.simple.usage.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+
+ expect(span.meta).to.have.property('openai.response.choices.0.message.content', 'I\'m just a computer')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens', 11)
+ expect(span.metrics).to.have.property('openai.response.usage.completion_tokens', 5)
+ expect(span.metrics).to.have.property('openai.response.usage.total_tokens', 16)
+ })
+
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-3.5-turbo',
+ messages: [{ role: 'user', content: 'How are you?', name: 'hunter2' }],
+ max_tokens: 5,
+ stream: true,
+ stream_options: {
+ include_usage: true
+ }
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ }
+
+ await checkTraces
+
+ const expectedTags = [
+ 'error:0',
+ 'org:kill-9',
+ 'endpoint:/v1/chat/completions',
+ 'model:gpt-3.5-turbo-0125'
+ ]
+
+ expect(metricStub).to.have.been.calledWith('openai.tokens.prompt', 11, 'd', expectedTags)
+ expect(metricStub).to.have.been.calledWith('openai.tokens.completion', 5, 'd', expectedTags)
+ expect(metricStub).to.have.been.calledWith('openai.tokens.total', 16, 'd', expectedTags)
+ })
+
+ it('makes a successful completion call', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/completions.simple.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+
+ expect(span).to.have.property('name', 'openai.request')
+ expect(span).to.have.property('type', 'openai')
+ expect(span).to.have.property('error', 0)
+ expect(span.meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(span.meta).to.have.property('openai.request.method', 'POST')
+ expect(span.meta).to.have.property('openai.request.endpoint', '/v1/completions')
+ expect(span.meta).to.have.property('openai.request.model', 'gpt-4o')
+ expect(span.meta).to.have.property('openai.request.prompt', 'Hello, OpenAI!')
+ expect(span.meta).to.have.property('openai.response.choices.0.finish_reason', 'stop')
+ expect(span.meta).to.have.property('openai.response.choices.0.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.0.text', ' this is a test.')
+
+ // token metrics - these should be estimated counts
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ expect(span.metrics).to.have.property('openai.response.usage.completion_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ expect(span.metrics).to.have.property('openai.response.usage.total_tokens')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens_estimated', 1)
+ })
+
+ const stream = await openai.completions.create({
+ model: 'gpt-4o',
+ prompt: 'Hello, OpenAI!',
+ temperature: 0.5,
+ stream: true
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ expect(part.choices[0]).to.have.property('text')
+ }
+
+ await checkTraces
+
+ expect(metricStub).to.have.been.calledWith('openai.tokens.prompt')
+ expect(metricStub).to.have.been.calledWith('openai.tokens.completion')
+ expect(metricStub).to.have.been.calledWith('openai.tokens.total')
+ })
+
+ it('makes a successful completion call with usage included', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/completions.simple.usage.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+
+ expect(span.meta).to.have.property('openai.response.choices.0.text', '\\n\\nI am an AI')
+ expect(span.metrics).to.have.property('openai.response.usage.prompt_tokens', 4)
+ expect(span.metrics).to.have.property('openai.response.usage.completion_tokens', 5)
+ expect(span.metrics).to.have.property('openai.response.usage.total_tokens', 9)
+ })
+
+ const stream = await openai.completions.create({
+ model: 'gpt-3.5-turbo-instruct',
+ prompt: 'How are you?',
+ stream: true,
+ max_tokens: 5,
+ stream_options: {
+ include_usage: true
+ }
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ }
+
+ await checkTraces
+
+ const expectedTags = [
+ 'error:0',
+ 'org:kill-9',
+ 'endpoint:/v1/completions',
+ 'model:gpt-3.5-turbo-instruct'
+ ]
+
+ expect(metricStub).to.have.been.calledWith('openai.tokens.prompt', 4, 'd', expectedTags)
+ expect(metricStub).to.have.been.calledWith('openai.tokens.completion', 5, 'd', expectedTags)
+ expect(metricStub).to.have.been.calledWith('openai.tokens.total', 9, 'd', expectedTags)
+ })
+
+ if (semver.intersects('>4.16.0', version)) {
+ it('makes a successful chat completion call with tools', async () => {
+ nock('https://api.openai.com:443')
+ .post('/v1/chat/completions')
+ .reply(200, function () {
+ return fs.createReadStream(Path.join(__dirname, 'streamed-responses/chat.completions.tools.txt'))
+ }, {
+ 'Content-Type': 'text/plain',
+ 'openai-organization': 'kill-9'
+ })
+
+ const checkTraces = agent
+ .use(traces => {
+ const span = traces[0][0]
+
+ expect(span).to.have.property('name', 'openai.request')
+ expect(span).to.have.property('type', 'openai')
+ expect(span).to.have.property('error', 0)
+ expect(span.meta).to.have.property('openai.organization.name', 'kill-9')
+ expect(span.meta).to.have.property('openai.request.method', 'POST')
+ expect(span.meta).to.have.property('openai.request.endpoint', '/v1/chat/completions')
+ expect(span.meta).to.have.property('openai.request.model', 'gpt-4')
+ expect(span.meta).to.have.property('openai.request.messages.0.content', 'Hello, OpenAI!')
+ expect(span.meta).to.have.property('openai.request.messages.0.role', 'user')
+ expect(span.meta).to.have.property('openai.request.messages.0.name', 'hunter2')
+ expect(span.meta).to.have.property('openai.response.choices.0.finish_reason', 'tool_calls')
+ expect(span.meta).to.have.property('openai.response.choices.0.logprobs', 'returned')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.role', 'assistant')
+ expect(span.meta).to.have.property('openai.response.choices.0.message.tool_calls.0.function.name',
+ 'get_current_weather')
+ })
+
+ const tools = [
+ {
+ type: 'function',
+ function: {
+ name: 'get_current_weather',
+ description: 'Get the current weather in a given location',
+ parameters: {
+ type: 'object',
+ properties: {
+ location: {
+ type: 'string',
+ description: 'The city and state, e.g. San Francisco, CA'
+ },
+ unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
+ },
+ required: ['location']
+ }
+ }
+ }
+ ]
+
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Hello, OpenAI!', name: 'hunter2' }],
+ temperature: 0.5,
+ tools,
+ tool_choice: 'auto',
+ stream: true
+ })
+
+ for await (const part of stream) {
+ expect(part).to.have.property('choices')
+ expect(part.choices[0]).to.have.property('delta')
+ }
+
+ await checkTraces
+ })
+ }
+ })
+ }
})
})
})
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.empty.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.empty.txt
new file mode 100644
index 00000000000..e1c6271262e
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.empty.txt
@@ -0,0 +1,5 @@
+data: {"id":"chatcmpl-9S9XTKSaDNOTtVqvF2hAbdu4UGYQa","object":"chat.completion.chunk","created":1716496879,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S9XTKSaDNOTtVqvF2hAbdu4UGYQa","object":"chat.completion.chunk","created":1716496879,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"length"}]}
+
+data: [DONE]
\ No newline at end of file
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.multiple.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.multiple.txt
new file mode 100644
index 00000000000..273475c39f9
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.multiple.txt
@@ -0,0 +1,212 @@
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"As"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" an"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" AI"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" just"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" just"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" computer"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" computer"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" don"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" program"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" program"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"'t"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" so"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" have"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" so"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" feelings"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" don"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" don"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" but"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"'t"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" have"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" feelings"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" but"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"'t"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" have"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" feelings"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" like"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" here"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" humans"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" do"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" here"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" and"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" How"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" can"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" ready"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" here"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" and"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" help"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" help"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" ready"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" with"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" anything"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" need"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" with"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" any"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" How"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" questions"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" can"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" or"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" tasks"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" have"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" How"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" can"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" help"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":1,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9UI1IdBePvsMlmjSRj8LRojnwpm9f","object":"chat.completion.chunk","created":1717006136,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":2,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: [DONE]
+
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.txt
new file mode 100644
index 00000000000..42bd5f10a6c
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.txt
@@ -0,0 +1,24 @@
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" How"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" can"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" you"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9EHaxj6mPeGvYIJ6lSzaTIcjSfDIi","object":"chat.completion.chunk","created":1713191255,"model":"gpt-3.5-turbo-0125","system_fingerprint":"fp_c2295e73ad","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: [DONE]
+
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.usage.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.usage.txt
new file mode 100644
index 00000000000..83af1ac9a65
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.simple.usage.txt
@@ -0,0 +1,17 @@
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"'m"},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" just"},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" computer"},"logprobs":null,"finish_reason":null}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"length"}],"usage":null}
+
+data: {"id":"chatcmpl-9V17JDcZNGGQucdBOBKsQ7LQ5jyLk","object":"chat.completion.chunk","created":1717179489,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":11,"completion_tokens":5,"total_tokens":16}}
+
+data: [DONE]
\ No newline at end of file
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tools.txt b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tools.txt
new file mode 100644
index 00000000000..f9b7a3a102b
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/chat.completions.tools.txt
@@ -0,0 +1,29 @@
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_2K7z4ywTEOTuDpFgpVTs3vCF","type":"function","function":{"name":"get_current_weather","arguments":""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\n"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"location"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"San"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Francisco"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":","}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" CA"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"\n"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9S8QJE6AyQxIFu9lf1U3VGOWWEkmA","object":"chat.completion.chunk","created":1716492591,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: [DONE]
\ No newline at end of file
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.txt b/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.txt
new file mode 100644
index 00000000000..38b54feeac2
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.txt
@@ -0,0 +1,15 @@
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":" ","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":"this","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":" is","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":" a","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":" test","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":".","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}
+
+data: {"id":"cmpl-9SBFwkdjMAXO6n7Z0cz7ScN1SKJSr","object":"text_completion","created":1716503480,"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"stop"}],"model":"gpt-3.5-turbo-instruct"}
+
+data: [DONE]
\ No newline at end of file
diff --git a/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.usage.txt b/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.usage.txt
new file mode 100644
index 00000000000..4b2014266aa
--- /dev/null
+++ b/packages/datadog-plugin-openai/test/streamed-responses/completions.simple.usage.txt
@@ -0,0 +1,15 @@
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":"\n\n","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":"I","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":" am","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":" an","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":" AI","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct","usage":null}
+
+data: {"id":"cmpl-9V1c0TcDX8x5tpPWvYvWtW4PxHGmB","object":"text_completion","created":1717181392,"model":"gpt-3.5-turbo-instruct","usage":{"prompt_tokens":4,"completion_tokens":5,"total_tokens":9},"choices":[]}
+
+data: [DONE]
\ No newline at end of file
diff --git a/packages/dd-trace/src/appsec/addresses.js b/packages/dd-trace/src/appsec/addresses.js
index c2352f14a61..086052218fd 100644
--- a/packages/dd-trace/src/appsec/addresses.js
+++ b/packages/dd-trace/src/appsec/addresses.js
@@ -15,10 +15,12 @@ module.exports = {
HTTP_INCOMING_GRAPHQL_RESOLVERS: 'graphql.server.all_resolvers',
HTTP_INCOMING_GRAPHQL_RESOLVER: 'graphql.server.resolver',
- HTTP_OUTGOING_BODY: 'server.response.body',
+ HTTP_INCOMING_RESPONSE_BODY: 'server.response.body',
HTTP_CLIENT_IP: 'http.client_ip',
USER_ID: 'usr.id',
- WAF_CONTEXT_PROCESSOR: 'waf.context.processor'
+ WAF_CONTEXT_PROCESSOR: 'waf.context.processor',
+
+ HTTP_OUTGOING_URL: 'server.io.net.url'
}
diff --git a/packages/dd-trace/src/appsec/blocking.js b/packages/dd-trace/src/appsec/blocking.js
index 65812357614..141667b9d57 100644
--- a/packages/dd-trace/src/appsec/blocking.js
+++ b/packages/dd-trace/src/appsec/blocking.js
@@ -8,7 +8,6 @@ const detectedSpecificEndpoints = {}
let templateHtml = blockedTemplates.html
let templateJson = blockedTemplates.json
let templateGraphqlJson = blockedTemplates.graphqlJson
-let blockingConfiguration
const specificBlockingTypes = {
GRAPHQL: 'graphql'
@@ -22,13 +21,13 @@ function addSpecificEndpoint (method, url, type) {
detectedSpecificEndpoints[getSpecificKey(method, url)] = type
}
-function getBlockWithRedirectData (rootSpan) {
- let statusCode = blockingConfiguration.parameters.status_code
+function getBlockWithRedirectData (rootSpan, actionParameters) {
+ let statusCode = actionParameters.status_code
if (!statusCode || statusCode < 300 || statusCode >= 400) {
statusCode = 303
}
const headers = {
- Location: blockingConfiguration.parameters.location
+ Location: actionParameters.location
}
rootSpan.addTags({
@@ -48,10 +47,9 @@ function getSpecificBlockingData (type) {
}
}
-function getBlockWithContentData (req, specificType, rootSpan) {
+function getBlockWithContentData (req, specificType, rootSpan, actionParameters) {
let type
let body
- let statusCode
const specificBlockingType = specificType || detectedSpecificEndpoints[getSpecificKey(req.method, req.url)]
if (specificBlockingType) {
@@ -64,7 +62,7 @@ function getBlockWithContentData (req, specificType, rootSpan) {
// parse the Accept header, ex: Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8
const accept = req.headers.accept?.split(',').map((str) => str.split(';', 1)[0].trim())
- if (!blockingConfiguration || blockingConfiguration.parameters.type === 'auto') {
+ if (!actionParameters || actionParameters.type === 'auto') {
if (accept?.includes('text/html') && !accept.includes('application/json')) {
type = 'text/html; charset=utf-8'
body = templateHtml
@@ -73,7 +71,7 @@ function getBlockWithContentData (req, specificType, rootSpan) {
body = templateJson
}
} else {
- if (blockingConfiguration.parameters.type === 'html') {
+ if (actionParameters.type === 'html') {
type = 'text/html; charset=utf-8'
body = templateHtml
} else {
@@ -83,11 +81,7 @@ function getBlockWithContentData (req, specificType, rootSpan) {
}
}
- if (blockingConfiguration?.type === 'block_request' && blockingConfiguration.parameters.status_code) {
- statusCode = blockingConfiguration.parameters.status_code
- } else {
- statusCode = 403
- }
+ const statusCode = actionParameters?.status_code || 403
const headers = {
'Content-Type': type,
@@ -101,27 +95,31 @@ function getBlockWithContentData (req, specificType, rootSpan) {
return { body, statusCode, headers }
}
-function getBlockingData (req, specificType, rootSpan) {
- if (blockingConfiguration?.type === 'redirect_request' && blockingConfiguration.parameters.location) {
- return getBlockWithRedirectData(rootSpan)
+function getBlockingData (req, specificType, rootSpan, actionParameters) {
+ if (actionParameters?.location) {
+ return getBlockWithRedirectData(rootSpan, actionParameters)
} else {
- return getBlockWithContentData(req, specificType, rootSpan)
+ return getBlockWithContentData(req, specificType, rootSpan, actionParameters)
}
}
-function block (req, res, rootSpan, abortController, type) {
+function block (req, res, rootSpan, abortController, actionParameters) {
if (res.headersSent) {
log.warn('Cannot send blocking response when headers have already been sent')
return
}
- const { body, headers, statusCode } = getBlockingData(req, type, rootSpan)
+ const { body, headers, statusCode } = getBlockingData(req, null, rootSpan, actionParameters)
res.writeHead(statusCode, headers).end(body)
abortController?.abort()
}
+function getBlockingAction (actions) {
+ return actions?.block_request || actions?.redirect_request
+}
+
function setTemplates (config) {
if (config.appsec.blockedTemplateHtml) {
templateHtml = config.appsec.blockedTemplateHtml
@@ -142,15 +140,11 @@ function setTemplates (config) {
}
}
-function updateBlockingConfiguration (newBlockingConfiguration) {
- blockingConfiguration = newBlockingConfiguration
-}
-
module.exports = {
addSpecificEndpoint,
block,
specificBlockingTypes,
getBlockingData,
- setTemplates,
- updateBlockingConfiguration
+ getBlockingAction,
+ setTemplates
}
diff --git a/packages/dd-trace/src/appsec/channels.js b/packages/dd-trace/src/appsec/channels.js
index fe4ce2fb881..57a3c29676c 100644
--- a/packages/dd-trace/src/appsec/channels.js
+++ b/packages/dd-trace/src/appsec/channels.js
@@ -17,5 +17,6 @@ module.exports = {
setCookieChannel: dc.channel('datadog:iast:set-cookie'),
nextBodyParsed: dc.channel('apm:next:body-parsed'),
nextQueryParsed: dc.channel('apm:next:query-parsed'),
- responseBody: dc.channel('datadog:express:response:json:start')
+ responseBody: dc.channel('datadog:express:response:json:start'),
+ httpClientRequestStart: dc.channel('apm:http:client:request:start')
}
diff --git a/packages/dd-trace/src/appsec/graphql.js b/packages/dd-trace/src/appsec/graphql.js
index 52a17acdf3a..ef957bebe0d 100644
--- a/packages/dd-trace/src/appsec/graphql.js
+++ b/packages/dd-trace/src/appsec/graphql.js
@@ -1,7 +1,12 @@
'use strict'
const { storage } = require('../../../datadog-core')
-const { addSpecificEndpoint, specificBlockingTypes, getBlockingData } = require('./blocking')
+const {
+ addSpecificEndpoint,
+ specificBlockingTypes,
+ getBlockingData,
+ getBlockingAction
+} = require('./blocking')
const waf = require('./waf')
const addresses = require('./addresses')
const web = require('../plugins/util/web')
@@ -32,10 +37,12 @@ function onGraphqlStartResolve ({ context, resolverInfo }) {
if (!resolverInfo || typeof resolverInfo !== 'object') return
const actions = waf.run({ ephemeral: { [addresses.HTTP_INCOMING_GRAPHQL_RESOLVER]: resolverInfo } }, req)
- if (actions?.includes('block')) {
+ const blockingAction = getBlockingAction(actions)
+ if (blockingAction) {
const requestData = graphqlRequestData.get(req)
if (requestData?.isInGraphqlRequest) {
requestData.blocked = true
+ requestData.wafAction = blockingAction
context?.abortController?.abort()
}
}
@@ -87,7 +94,7 @@ function beforeWriteApolloGraphqlResponse ({ abortController, abortData }) {
const rootSpan = web.root(req)
if (!rootSpan) return
- const blockingData = getBlockingData(req, specificBlockingTypes.GRAPHQL, rootSpan)
+ const blockingData = getBlockingData(req, specificBlockingTypes.GRAPHQL, rootSpan, requestData.wafAction)
abortData.statusCode = blockingData.statusCode
abortData.headers = blockingData.headers
abortData.message = blockingData.body
diff --git a/packages/dd-trace/src/appsec/index.js b/packages/dd-trace/src/appsec/index.js
index d6b17ea49b1..76e67a0ef72 100644
--- a/packages/dd-trace/src/appsec/index.js
+++ b/packages/dd-trace/src/appsec/index.js
@@ -22,10 +22,11 @@ const apiSecuritySampler = require('./api_security_sampler')
const web = require('../plugins/util/web')
const { extractIp } = require('../plugins/util/ip_extractor')
const { HTTP_CLIENT_IP } = require('../../../../ext/tags')
-const { block, setTemplates } = require('./blocking')
+const { block, setTemplates, getBlockingAction } = require('./blocking')
const { passportTrackEvent } = require('./passport')
const { storage } = require('../../../datadog-core')
const graphql = require('./graphql')
+const rasp = require('./rasp')
let isEnabled = false
let config
@@ -37,6 +38,10 @@ function enable (_config) {
appsecTelemetry.enable(_config.telemetry)
graphql.enable()
+ if (_config.appsec.rasp.enabled) {
+ rasp.enable()
+ }
+
setTemplates(_config)
RuleManager.loadRules(_config.appsec)
@@ -203,7 +208,7 @@ function onResponseBody ({ req, body }) {
// we don't support blocking at this point, so no results needed
waf.run({
persistent: {
- [addresses.HTTP_OUTGOING_BODY]: body
+ [addresses.HTTP_INCOMING_RESPONSE_BODY]: body
}
}, req)
}
@@ -223,8 +228,9 @@ function onPassportVerify ({ credentials, user }) {
function handleResults (actions, req, res, rootSpan, abortController) {
if (!actions || !req || !res || !rootSpan || !abortController) return
- if (actions.includes('block')) {
- block(req, res, rootSpan, abortController)
+ const blockingAction = getBlockingAction(actions)
+ if (blockingAction) {
+ block(req, res, rootSpan, abortController, blockingAction)
}
}
@@ -236,6 +242,7 @@ function disable () {
appsecTelemetry.disable()
graphql.disable()
+ rasp.disable()
remoteConfig.disableWafUpdate()
diff --git a/packages/dd-trace/src/appsec/rasp.js b/packages/dd-trace/src/appsec/rasp.js
new file mode 100644
index 00000000000..1a4873718b9
--- /dev/null
+++ b/packages/dd-trace/src/appsec/rasp.js
@@ -0,0 +1,35 @@
+'use strict'
+
+const { storage } = require('../../../datadog-core')
+const addresses = require('./addresses')
+const { httpClientRequestStart } = require('./channels')
+const waf = require('./waf')
+
+function enable () {
+ httpClientRequestStart.subscribe(analyzeSsrf)
+}
+
+function disable () {
+ if (httpClientRequestStart.hasSubscribers) httpClientRequestStart.unsubscribe(analyzeSsrf)
+}
+
+function analyzeSsrf (ctx) {
+ const store = storage.getStore()
+ const req = store?.req
+ const url = ctx.args.uri
+
+ if (!req || !url) return
+
+ const persistent = {
+ [addresses.HTTP_OUTGOING_URL]: url
+ }
+ // TODO: Currently this is only monitoring, we should
+ // block the request if SSRF attempt and
+ // generate stack traces
+ waf.run({ persistent }, req)
+}
+
+module.exports = {
+ enable,
+ disable
+}
diff --git a/packages/dd-trace/src/appsec/remote_config/capabilities.js b/packages/dd-trace/src/appsec/remote_config/capabilities.js
index 95e032a4dc3..61684e171f0 100644
--- a/packages/dd-trace/src/appsec/remote_config/capabilities.js
+++ b/packages/dd-trace/src/appsec/remote_config/capabilities.js
@@ -15,5 +15,6 @@ module.exports = {
APM_TRACING_LOGS_INJECTION: 1n << 13n,
APM_TRACING_HTTP_HEADER_TAGS: 1n << 14n,
APM_TRACING_CUSTOM_TAGS: 1n << 15n,
- APM_TRACING_ENABLED: 1n << 19n
+ APM_TRACING_ENABLED: 1n << 19n,
+ APM_TRACING_SAMPLE_RULES: 1n << 29n
}
diff --git a/packages/dd-trace/src/appsec/remote_config/index.js b/packages/dd-trace/src/appsec/remote_config/index.js
index 58585c169bc..f39d02347eb 100644
--- a/packages/dd-trace/src/appsec/remote_config/index.js
+++ b/packages/dd-trace/src/appsec/remote_config/index.js
@@ -15,6 +15,7 @@ function enable (config, appsec) {
rc.updateCapabilities(RemoteConfigCapabilities.APM_TRACING_LOGS_INJECTION, true)
rc.updateCapabilities(RemoteConfigCapabilities.APM_TRACING_SAMPLE_RATE, true)
rc.updateCapabilities(RemoteConfigCapabilities.APM_TRACING_ENABLED, true)
+ rc.updateCapabilities(RemoteConfigCapabilities.APM_TRACING_SAMPLE_RULES, true)
const activation = Activation.fromConfig(config)
diff --git a/packages/dd-trace/src/appsec/rule_manager.js b/packages/dd-trace/src/appsec/rule_manager.js
index 3cbef3597e3..635b1c1acb5 100644
--- a/packages/dd-trace/src/appsec/rule_manager.js
+++ b/packages/dd-trace/src/appsec/rule_manager.js
@@ -3,7 +3,6 @@
const fs = require('fs')
const waf = require('./waf')
const { ACKNOWLEDGED, ERROR } = require('./remote_config/apply_states')
-const blocking = require('./blocking')
let defaultRules
@@ -20,10 +19,6 @@ function loadRules (config) {
: require('./recommended.json')
waf.init(defaultRules, config)
-
- if (defaultRules.actions) {
- blocking.updateBlockingConfiguration(defaultRules.actions.find(action => action.id === 'block'))
- }
}
function updateWafFromRC ({ toUnapply, toApply, toModify }) {
@@ -68,7 +63,7 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
item.apply_state = ERROR
item.apply_error = 'Multiple ruleset received in ASM_DD'
} else {
- if (file && file.rules && file.rules.length) {
+ if (file?.rules?.length) {
const { version, metadata, rules, processors, scanners } = file
newRuleset = { version, metadata, rules, processors, scanners }
@@ -78,30 +73,23 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
batch.add(item)
}
} else if (product === 'ASM') {
- let batchConfiguration = false
- if (file && file.rules_override && file.rules_override.length) {
- batchConfiguration = true
+ if (file?.rules_override?.length) {
newRulesOverride.set(id, file.rules_override)
}
- if (file && file.exclusions && file.exclusions.length) {
- batchConfiguration = true
+ if (file?.exclusions?.length) {
newExclusions.set(id, file.exclusions)
}
- if (file && file.custom_rules && file.custom_rules.length) {
- batchConfiguration = true
+ if (file?.custom_rules?.length) {
newCustomRules.set(id, file.custom_rules)
}
- if (file && file.actions && file.actions.length) {
+ if (file?.actions?.length) {
newActions.set(id, file.actions)
}
- // "actions" data is managed by tracer and not by waf
- if (batchConfiguration) {
- batch.add(item)
- }
+ batch.add(item)
}
}
@@ -112,7 +100,9 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
newRuleset ||
newRulesOverride.modified ||
newExclusions.modified ||
- newCustomRules.modified) {
+ newCustomRules.modified ||
+ newActions.modified
+ ) {
const payload = newRuleset || {}
if (newRulesData.modified) {
@@ -127,6 +117,9 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
if (newCustomRules.modified) {
payload.custom_rules = concatArrays(newCustomRules)
}
+ if (newActions.modified) {
+ payload.actions = concatArrays(newActions)
+ }
try {
waf.update(payload)
@@ -146,6 +139,9 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
if (newCustomRules.modified) {
appliedCustomRules = newCustomRules
}
+ if (newActions.modified) {
+ appliedActions = newActions
+ }
} catch (err) {
newApplyState = ERROR
newApplyError = err.toString()
@@ -156,11 +152,6 @@ function updateWafFromRC ({ toUnapply, toApply, toModify }) {
config.apply_state = newApplyState
if (newApplyError) config.apply_error = newApplyError
}
-
- if (newActions.modified) {
- blocking.updateBlockingConfiguration(concatArrays(newActions).find(action => action.id === 'block'))
- appliedActions = newActions
- }
}
// A Map with a new prop `modified`, a bool that indicates if the Map was modified
@@ -242,7 +233,6 @@ function copyRulesData (rulesData) {
function clearAllRules () {
waf.destroy()
- blocking.updateBlockingConfiguration(undefined)
defaultRules = undefined
diff --git a/packages/dd-trace/src/appsec/sdk/user_blocking.js b/packages/dd-trace/src/appsec/sdk/user_blocking.js
index 0c385b8202d..19997d3ff9c 100644
--- a/packages/dd-trace/src/appsec/sdk/user_blocking.js
+++ b/packages/dd-trace/src/appsec/sdk/user_blocking.js
@@ -3,17 +3,14 @@
const { USER_ID } = require('../addresses')
const waf = require('../waf')
const { getRootSpan } = require('./utils')
-const { block } = require('../blocking')
+const { block, getBlockingAction } = require('../blocking')
const { storage } = require('../../../../datadog-core')
const { setUserTags } = require('./set_user')
const log = require('../../log')
function isUserBlocked (user) {
const actions = waf.run({ persistent: { [USER_ID]: user.id } })
-
- if (!actions) return false
-
- return actions.includes('block')
+ return !!getBlockingAction(actions)
}
function checkUserAndSetUser (tracer, user) {
diff --git a/packages/dd-trace/src/appsec/waf/waf_context_wrapper.js b/packages/dd-trace/src/appsec/waf/waf_context_wrapper.js
index 7c7f99fba3e..53da0d5d5df 100644
--- a/packages/dd-trace/src/appsec/waf/waf_context_wrapper.js
+++ b/packages/dd-trace/src/appsec/waf/waf_context_wrapper.js
@@ -3,6 +3,7 @@
const log = require('../../log')
const Reporter = require('../reporter')
const addresses = require('../addresses')
+const { getBlockingAction } = require('../blocking')
// TODO: remove once ephemeral addresses are implemented
const preventDuplicateAddresses = new Set([
@@ -60,7 +61,8 @@ class WAFContextWrapper {
this.addressesToSkip = newAddressesToSkip
const ruleTriggered = !!result.events?.length
- const blockTriggered = result.actions?.includes('block')
+
+ const blockTriggered = !!getBlockingAction(result.actions)
Reporter.reportMetrics({
duration: result.totalRuntime / 1e3,
diff --git a/packages/dd-trace/src/ci-visibility/exporters/test-worker/index.js b/packages/dd-trace/src/ci-visibility/exporters/test-worker/index.js
index f91bdd52090..e74869dbe82 100644
--- a/packages/dd-trace/src/ci-visibility/exporters/test-worker/index.js
+++ b/packages/dd-trace/src/ci-visibility/exporters/test-worker/index.js
@@ -4,7 +4,8 @@ const Writer = require('./writer')
const {
JEST_WORKER_COVERAGE_PAYLOAD_CODE,
JEST_WORKER_TRACE_PAYLOAD_CODE,
- CUCUMBER_WORKER_TRACE_PAYLOAD_CODE
+ CUCUMBER_WORKER_TRACE_PAYLOAD_CODE,
+ MOCHA_WORKER_TRACE_PAYLOAD_CODE
} = require('../../../plugins/util/test')
function getInterprocessTraceCode () {
@@ -14,6 +15,9 @@ function getInterprocessTraceCode () {
if (process.env.CUCUMBER_WORKER_ID) {
return CUCUMBER_WORKER_TRACE_PAYLOAD_CODE
}
+ if (process.env.MOCHA_WORKER_ID) {
+ return MOCHA_WORKER_TRACE_PAYLOAD_CODE
+ }
return null
}
diff --git a/packages/dd-trace/src/config.js b/packages/dd-trace/src/config.js
index 2f2e432dce6..1ec2462aa69 100644
--- a/packages/dd-trace/src/config.js
+++ b/packages/dd-trace/src/config.js
@@ -2,7 +2,7 @@
const fs = require('fs')
const os = require('os')
-const uuid = require('crypto-randomuuid')
+const uuid = require('crypto-randomuuid') // we need to keep the old uuid dep because of cypress
const URL = require('url').URL
const log = require('./log')
const pkg = require('./pkg')
@@ -170,6 +170,7 @@ class Config {
// Configure the logger first so it can be used to warn about other configs
this.debug = isTrue(coalesce(
process.env.DD_TRACE_DEBUG,
+ process.env.OTEL_LOG_LEVEL && process.env.OTEL_LOG_LEVEL === 'debug',
false
))
this.logger = options.logger
@@ -288,15 +289,6 @@ class Config {
)
const sampler = {
- rules: coalesce(
- options.samplingRules,
- safeJsonParse(process.env.DD_TRACE_SAMPLING_RULES),
- []
- ).map(rule => {
- return remapify(rule, {
- sample_rate: 'sampleRate'
- })
- }),
spanSamplingRules: coalesce(
options.spanSamplingRules,
safeJsonParse(maybeFile(process.env.DD_SPAN_SAMPLING_RULES_FILE)),
@@ -444,6 +436,7 @@ class Config {
this._setValue(defaults, 'appsec.enabled', undefined)
this._setValue(defaults, 'appsec.obfuscatorKeyRegex', defaultWafObfuscatorKeyRegex)
this._setValue(defaults, 'appsec.obfuscatorValueRegex', defaultWafObfuscatorValueRegex)
+ this._setValue(defaults, 'appsec.rasp.enabled', false)
this._setValue(defaults, 'appsec.rateLimit', 100)
this._setValue(defaults, 'appsec.rules', undefined)
this._setValue(defaults, 'appsec.sca.enabled', null)
@@ -485,9 +478,12 @@ class Config {
this._setValue(defaults, 'peerServiceMapping', {})
this._setValue(defaults, 'plugins', true)
this._setValue(defaults, 'port', '8126')
- this._setValue(defaults, 'profiling.enabled', false)
+ this._setValue(defaults, 'profiling.enabled', undefined)
this._setValue(defaults, 'profiling.exporters', 'agent')
this._setValue(defaults, 'profiling.sourceMap', true)
+ this._setValue(defaults, 'profiling.ssi', false)
+ this._setValue(defaults, 'profiling.heuristicsEnabled', false)
+ this._setValue(defaults, 'profiling.longLivedThreshold', undefined)
this._setValue(defaults, 'protocolVersion', '0.4')
this._setValue(defaults, 'queryStringObfuscation', qsRegex)
this._setValue(defaults, 'remoteConfig.enabled', true)
@@ -496,6 +492,7 @@ class Config {
this._setValue(defaults, 'runtimeMetrics', false)
this._setValue(defaults, 'sampleRate', undefined)
this._setValue(defaults, 'sampler.rateLimit', undefined)
+ this._setValue(defaults, 'sampler.rules', [])
this._setValue(defaults, 'scope', undefined)
this._setValue(defaults, 'service', service)
this._setValue(defaults, 'site', 'datadoghq.com')
@@ -531,6 +528,7 @@ class Config {
DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP,
DD_APPSEC_RULES,
DD_APPSEC_SCA_ENABLED,
+ DD_APPSEC_RASP_ENABLED,
DD_APPSEC_TRACE_RATE_LIMIT,
DD_APPSEC_WAF_TIMEOUT,
DD_DATA_STREAMS_ENABLED,
@@ -558,6 +556,7 @@ class Config {
DD_PROFILING_ENABLED,
DD_PROFILING_EXPORTERS,
DD_PROFILING_SOURCE_MAP,
+ DD_INTERNAL_PROFILING_LONG_LIVED_THRESHOLD,
DD_REMOTE_CONFIGURATION_ENABLED,
DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS,
DD_RUNTIME_METRICS_ENABLED,
@@ -590,6 +589,7 @@ class Config {
DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED,
DD_TRACE_REPORT_HOSTNAME,
DD_TRACE_SAMPLE_RATE,
+ DD_TRACE_SAMPLING_RULES,
DD_TRACE_SCOPE,
DD_TRACE_SPAN_ATTRIBUTE_SCHEMA,
DD_TRACE_STARTUP_LOGS,
@@ -618,6 +618,7 @@ class Config {
this._setBoolean(env, 'appsec.enabled', DD_APPSEC_ENABLED)
this._setString(env, 'appsec.obfuscatorKeyRegex', DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP)
this._setString(env, 'appsec.obfuscatorValueRegex', DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP)
+ this._setBoolean(env, 'appsec.rasp.enabled', DD_APPSEC_RASP_ENABLED)
this._setValue(env, 'appsec.rateLimit', maybeInt(DD_APPSEC_TRACE_RATE_LIMIT))
this._setString(env, 'appsec.rules', DD_APPSEC_RULES)
// DD_APPSEC_SCA_ENABLED is never used locally, but only sent to the backend
@@ -663,6 +664,17 @@ class Config {
this._setBoolean(env, 'profiling.enabled', coalesce(DD_EXPERIMENTAL_PROFILING_ENABLED, DD_PROFILING_ENABLED))
this._setString(env, 'profiling.exporters', DD_PROFILING_EXPORTERS)
this._setBoolean(env, 'profiling.sourceMap', DD_PROFILING_SOURCE_MAP && !isFalse(DD_PROFILING_SOURCE_MAP))
+ if (DD_PROFILING_ENABLED === 'auto' || DD_INJECTION_ENABLED) {
+ this._setBoolean(env, 'profiling.ssi', true)
+ if (DD_PROFILING_ENABLED === 'auto' || DD_INJECTION_ENABLED.split(',').includes('profiler')) {
+ this._setBoolean(env, 'profiling.heuristicsEnabled', true)
+ }
+ if (DD_INTERNAL_PROFILING_LONG_LIVED_THRESHOLD) {
+ // This is only used in testing to not have to wait 30s
+ this._setValue(env, 'profiling.longLivedThreshold', Number(DD_INTERNAL_PROFILING_LONG_LIVED_THRESHOLD))
+ }
+ }
+
this._setString(env, 'protocolVersion', DD_TRACE_AGENT_PROTOCOL_VERSION)
this._setString(env, 'queryStringObfuscation', DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP)
this._setBoolean(env, 'remoteConfig.enabled', coalesce(
@@ -687,6 +699,7 @@ class Config {
}
this._setUnit(env, 'sampleRate', DD_TRACE_SAMPLE_RATE || OTEL_TRACES_SAMPLER_MAPPING[OTEL_TRACES_SAMPLER])
this._setValue(env, 'sampler.rateLimit', DD_TRACE_RATE_LIMIT)
+ this._setSamplingRule(env, 'sampler.rules', safeJsonParse(DD_TRACE_SAMPLING_RULES)) // example
this._setString(env, 'scope', DD_TRACE_SCOPE)
this._setString(env, 'service', DD_SERVICE || DD_SERVICE_NAME || tags.service || OTEL_SERVICE_NAME)
this._setString(env, 'site', DD_SITE)
@@ -707,9 +720,7 @@ class Config {
this._setBoolean(env, 'telemetry.dependencyCollection', DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED)
this._setValue(env, 'telemetry.heartbeatInterval', maybeInt(Math.floor(DD_TELEMETRY_HEARTBEAT_INTERVAL * 1000)))
const hasTelemetryLogsUsingFeatures =
- isTrue(DD_IAST_ENABLED) ||
- isTrue(DD_PROFILING_ENABLED) ||
- (typeof DD_INJECTION_ENABLED === 'string' && DD_INJECTION_ENABLED.split(',').includes('profiling'))
+ env['iast.enabled'] || env['profiling.enabled'] || env['profiling.heuristicsEnabled']
? true
: undefined
this._setBoolean(env, 'telemetry.logCollection', coalesce(DD_TELEMETRY_LOG_COLLECTION_ENABLED,
@@ -734,6 +745,7 @@ class Config {
this._setBoolean(opts, 'appsec.enabled', options.appsec.enabled)
this._setString(opts, 'appsec.obfuscatorKeyRegex', options.appsec.obfuscatorKeyRegex)
this._setString(opts, 'appsec.obfuscatorValueRegex', options.appsec.obfuscatorValueRegex)
+ this._setBoolean(opts, 'appsec.rasp.enabled', options.appsec.rasp?.enabled)
this._setValue(opts, 'appsec.rateLimit', maybeInt(options.appsec.rateLimit))
this._setString(opts, 'appsec.rules', options.appsec.rules)
this._setValue(opts, 'appsec.wafTimeout', maybeInt(options.appsec.wafTimeout))
@@ -786,6 +798,7 @@ class Config {
this._setUnit(opts, 'sampleRate', coalesce(options.sampleRate, options.ingestion.sampleRate))
const ingestion = options.ingestion || {}
this._setValue(opts, 'sampler.rateLimit', coalesce(options.rateLimit, ingestion.rateLimit))
+ this._setSamplingRule(opts, 'sampler.rules', options.samplingRules)
this._setString(opts, 'service', options.service || tags.service)
this._setString(opts, 'site', options.site)
if (options.spanAttributeSchema) {
@@ -933,6 +946,17 @@ class Config {
this._setArray(opts, 'headerTags', headerTags)
this._setTags(opts, 'tags', tags)
this._setBoolean(opts, 'tracing', options.tracing_enabled)
+ // ignore tags for now since rc sampling rule tags format is not supported
+ this._setSamplingRule(opts, 'sampler.rules', this._ignoreTags(options.trace_sample_rules))
+ }
+
+ _ignoreTags (samplingRules) {
+ if (samplingRules) {
+ for (const rule of samplingRules) {
+ delete rule.tags
+ }
+ }
+ return samplingRules
}
_setBoolean (obj, name, value) {
@@ -959,12 +983,12 @@ class Config {
}
_setArray (obj, name, value) {
- if (value === null || value === undefined) {
+ if (value == null) {
return this._setValue(obj, name, null)
}
if (typeof value === 'string') {
- value = value && value.split(',')
+ value = value.split(',')
}
if (Array.isArray(value)) {
@@ -972,6 +996,25 @@ class Config {
}
}
+ _setSamplingRule (obj, name, value) {
+ if (value == null) {
+ return this._setValue(obj, name, null)
+ }
+
+ if (typeof value === 'string') {
+ value = value.split(',')
+ }
+
+ if (Array.isArray(value)) {
+ value = value.map(rule => {
+ return remapify(rule, {
+ sample_rate: 'sampleRate'
+ })
+ })
+ this._setValue(obj, name, value)
+ }
+ }
+
_setString (obj, name, value) {
obj[name] = value ? String(value) : undefined // unset for empty strings
}
diff --git a/packages/dd-trace/src/encode/0.4.js b/packages/dd-trace/src/encode/0.4.js
index 11cf8c4b6c7..02d96cb8a26 100644
--- a/packages/dd-trace/src/encode/0.4.js
+++ b/packages/dd-trace/src/encode/0.4.js
@@ -120,7 +120,7 @@ class AgentEncoder {
this._encodeMap(bytes, span.metrics)
if (span.meta_struct) {
this._encodeString(bytes, 'meta_struct')
- this._encodeObject(bytes, span.meta_struct)
+ this._encodeMetaStruct(bytes, span.meta_struct)
}
}
}
@@ -271,12 +271,48 @@ class AgentEncoder {
}
}
+ _encodeMetaStruct (bytes, value) {
+ const keys = Array.isArray(value) ? [] : Object.keys(value)
+ const validKeys = keys.filter(key => {
+ const v = value[key]
+ return typeof v === 'string' ||
+ typeof v === 'number' ||
+ (v !== null && typeof v === 'object')
+ })
+
+ this._encodeMapPrefix(bytes, validKeys.length)
+
+ for (const key of validKeys) {
+ const v = value[key]
+ this._encodeString(bytes, key)
+ this._encodeObjectAsByteArray(bytes, v)
+ }
+ }
+
+ _encodeObjectAsByteArray (bytes, value) {
+ const prefixLength = 5
+ const offset = bytes.length
+
+ bytes.reserve(prefixLength)
+ bytes.length += prefixLength
+
+ this._encodeObject(bytes, value)
+
+ // we should do it after encoding the object to know the real length
+ const length = bytes.length - offset - prefixLength
+ bytes.buffer[offset] = 0xc6
+ bytes.buffer[offset + 1] = length >> 24
+ bytes.buffer[offset + 2] = length >> 16
+ bytes.buffer[offset + 3] = length >> 8
+ bytes.buffer[offset + 4] = length
+ }
+
_encodeObject (bytes, value, circularReferencesDetector = new Set()) {
circularReferencesDetector.add(value)
if (Array.isArray(value)) {
- return this._encodeObjectAsArray(bytes, value, circularReferencesDetector)
+ this._encodeObjectAsArray(bytes, value, circularReferencesDetector)
} else if (value !== null && typeof value === 'object') {
- return this._encodeObjectAsMap(bytes, value, circularReferencesDetector)
+ this._encodeObjectAsMap(bytes, value, circularReferencesDetector)
} else if (typeof value === 'string' || typeof value === 'number') {
this._encodeValue(bytes, value)
}
@@ -284,16 +320,19 @@ class AgentEncoder {
_encodeObjectAsMap (bytes, value, circularReferencesDetector) {
const keys = Object.keys(value)
- const validKeys = keys.filter(key =>
- typeof value[key] === 'string' ||
- typeof value[key] === 'number' ||
- (value[key] !== null && typeof value[key] === 'object' && !circularReferencesDetector.has(value[key])))
+ const validKeys = keys.filter(key => {
+ const v = value[key]
+ return typeof v === 'string' ||
+ typeof v === 'number' ||
+ (v !== null && typeof v === 'object' && !circularReferencesDetector.has(v))
+ })
this._encodeMapPrefix(bytes, validKeys.length)
for (const key of validKeys) {
+ const v = value[key]
this._encodeString(bytes, key)
- this._encodeObject(bytes, value[key], circularReferencesDetector)
+ this._encodeObject(bytes, v, circularReferencesDetector)
}
}
diff --git a/packages/dd-trace/src/exporter.js b/packages/dd-trace/src/exporter.js
index 01bb96ac380..02d50c3b57e 100644
--- a/packages/dd-trace/src/exporter.js
+++ b/packages/dd-trace/src/exporter.js
@@ -19,6 +19,7 @@ module.exports = name => {
return require('./ci-visibility/exporters/agent-proxy')
case exporters.JEST_WORKER:
case exporters.CUCUMBER_WORKER:
+ case exporters.MOCHA_WORKER:
return require('./ci-visibility/exporters/test-worker')
default:
return inAWSLambda && !usingLambdaExtension ? require('./exporters/log') : require('./exporters/agent')
diff --git a/packages/dd-trace/src/flare/file.js b/packages/dd-trace/src/flare/file.js
new file mode 100644
index 00000000000..00388e14c5b
--- /dev/null
+++ b/packages/dd-trace/src/flare/file.js
@@ -0,0 +1,44 @@
+'use strict'
+
+const { Writable } = require('stream')
+
+const INITIAL_SIZE = 64 * 1024
+
+class FlareFile extends Writable {
+ constructor () {
+ super()
+
+ this.length = 0
+
+ this._buffer = Buffer.alloc(INITIAL_SIZE)
+ }
+
+ get data () {
+ return this._buffer.subarray(0, this.length)
+ }
+
+ _write (chunk, encoding, callback) {
+ const length = Buffer.byteLength(chunk)
+
+ this._reserve(length)
+
+ if (Buffer.isBuffer(chunk)) {
+ this.length += chunk.copy(this._buffer, this.length)
+ } else {
+ this.length += this._buffer.write(chunk, encoding)
+ }
+
+ callback()
+ }
+
+ _reserve (length) {
+ while (this.length + length > this._buffer.length) {
+ const buffer = Buffer.alloc(this.length * 2)
+
+ this._buffer.copy(buffer)
+ this._buffer = buffer
+ }
+ }
+}
+
+module.exports = FlareFile
diff --git a/packages/dd-trace/src/flare/index.js b/packages/dd-trace/src/flare/index.js
new file mode 100644
index 00000000000..70ec4ccd75e
--- /dev/null
+++ b/packages/dd-trace/src/flare/index.js
@@ -0,0 +1,98 @@
+'use strict'
+
+const log = require('../log')
+const startupLog = require('../startup-log')
+const FlareFile = require('./file')
+const { LogChannel } = require('../log/channels')
+const request = require('../exporters/common/request')
+const FormData = require('../exporters/common/form-data')
+
+const MAX_LOG_SIZE = 12 * 1024 * 1024 // 12MB soft limit
+const TIMEOUT = 20 * 1000 * 60
+
+let logChannel = null
+let tracerLogs = null
+let timer
+let tracerConfig = null
+
+const logger = {
+ debug: (msg) => recordLog(msg),
+ info: (msg) => recordLog(msg),
+ warn: (msg) => recordLog(msg),
+ error: (err) => recordLog(err.stack)
+}
+
+const flare = {
+ enable (tracerConfig_) {
+ tracerConfig = tracerConfig_
+ },
+
+ disable () {
+ tracerConfig = null
+
+ flare.cleanup()
+ },
+
+ prepare (logLevel) {
+ if (!tracerConfig) return
+
+ logChannel?.unsubscribe(logger)
+ logChannel = new LogChannel(logLevel)
+ logChannel.subscribe(logger)
+ tracerLogs = tracerLogs || new FlareFile()
+ timer = timer || setTimeout(flare.cleanup, TIMEOUT)
+ },
+
+ send (task) {
+ if (!tracerConfig) return
+
+ const tracerInfo = new FlareFile()
+
+ tracerInfo.write(JSON.stringify(startupLog.tracerInfo(), null, 2))
+
+ flare._sendFile(task, tracerInfo, 'tracer_info.txt')
+ flare._sendFile(task, tracerLogs, 'tracer_logs.txt')
+
+ flare.cleanup()
+ },
+
+ cleanup () {
+ logChannel?.unsubscribe(logger)
+ timer = clearTimeout(timer)
+ logChannel = null
+ tracerLogs = null
+ },
+
+ _sendFile (task, file, filename) {
+ if (!file) return
+
+ const form = new FormData()
+
+ form.append('case_id', task.case_id)
+ form.append('hostname', task.hostname)
+ form.append('email', task.user_handle)
+ form.append('source', 'tracer_nodejs')
+ form.append('flare_file', file.data, { filename })
+
+ request(form, {
+ url: tracerConfig.url,
+ hostname: tracerConfig.hostname,
+ port: tracerConfig.port,
+ method: 'POST',
+ path: '/tracer_flare/v1',
+ headers: form.getHeaders()
+ }, (err) => {
+ if (err) {
+ log.error(err)
+ }
+ })
+ }
+}
+
+function recordLog (msg) {
+ if (tracerLogs.length > MAX_LOG_SIZE) return
+
+ tracerLogs.write(`${msg}\n`) // TODO: gzip
+}
+
+module.exports = flare
diff --git a/packages/dd-trace/src/log/channels.js b/packages/dd-trace/src/log/channels.js
index 0bf84871b34..545fef4195a 100644
--- a/packages/dd-trace/src/log/channels.js
+++ b/packages/dd-trace/src/log/channels.js
@@ -3,44 +3,69 @@
const { channel } = require('dc-polyfill')
const Level = {
- Debug: 'debug',
- Info: 'info',
- Warn: 'warn',
- Error: 'error'
+ trace: 20,
+ debug: 20,
+ info: 30,
+ warn: 40,
+ error: 50,
+ critical: 50,
+ off: 100
}
-const defaultLevel = Level.Debug
+const debugChannel = channel('datadog:log:debug')
+const infoChannel = channel('datadog:log:info')
+const warnChannel = channel('datadog:log:warn')
+const errorChannel = channel('datadog:log:error')
-// based on: https://github.com/trentm/node-bunyan#levels
-const logChannels = {
- [Level.Debug]: createLogChannel(Level.Debug, 20),
- [Level.Info]: createLogChannel(Level.Info, 30),
- [Level.Warn]: createLogChannel(Level.Warn, 40),
- [Level.Error]: createLogChannel(Level.Error, 50)
-}
+const defaultLevel = Level.debug
-function createLogChannel (name, logLevel) {
- const logChannel = channel(`datadog:log:${name}`)
- logChannel.logLevel = logLevel
- return logChannel
+function getChannelLogLevel (level) {
+ return level && typeof level === 'string'
+ ? Level[level.toLowerCase().trim()] || defaultLevel
+ : defaultLevel
}
-function getChannelLogLevel (level) {
- let logChannel
- if (level && typeof level === 'string') {
- logChannel = logChannels[level.toLowerCase().trim()] || logChannels[defaultLevel]
- } else {
- logChannel = logChannels[defaultLevel]
+class LogChannel {
+ constructor (level) {
+ this._level = getChannelLogLevel(level)
+ }
+
+ subscribe (logger) {
+ if (Level.debug >= this._level) {
+ debugChannel.subscribe(logger.debug)
+ }
+ if (Level.info >= this._level) {
+ infoChannel.subscribe(logger.info)
+ }
+ if (Level.warn >= this._level) {
+ warnChannel.subscribe(logger.warn)
+ }
+ if (Level.error >= this._level) {
+ errorChannel.subscribe(logger.error)
+ }
+ }
+
+ unsubscribe (logger) {
+ if (debugChannel.hasSubscribers) {
+ debugChannel.unsubscribe(logger.debug)
+ }
+ if (infoChannel.hasSubscribers) {
+ infoChannel.unsubscribe(logger.info)
+ }
+ if (warnChannel.hasSubscribers) {
+ warnChannel.unsubscribe(logger.warn)
+ }
+ if (errorChannel.hasSubscribers) {
+ errorChannel.unsubscribe(logger.error)
+ }
}
- return logChannel.logLevel
}
module.exports = {
- Level,
- getChannelLogLevel,
+ LogChannel,
- debugChannel: logChannels[Level.Debug],
- infoChannel: logChannels[Level.Info],
- warnChannel: logChannels[Level.Warn],
- errorChannel: logChannels[Level.Error]
+ debugChannel,
+ infoChannel,
+ warnChannel,
+ errorChannel
}
diff --git a/packages/dd-trace/src/log/writer.js b/packages/dd-trace/src/log/writer.js
index 798d6269f14..bc4a5b20621 100644
--- a/packages/dd-trace/src/log/writer.js
+++ b/packages/dd-trace/src/log/writer.js
@@ -1,8 +1,7 @@
'use strict'
const { storage } = require('../../../datadog-core')
-const { getChannelLogLevel, debugChannel, infoChannel, warnChannel, errorChannel } = require('./channels')
-
+const { LogChannel } = require('./channels')
const defaultLogger = {
debug: msg => console.debug(msg), /* eslint-disable-line no-console */
info: msg => console.info(msg), /* eslint-disable-line no-console */
@@ -12,7 +11,7 @@ const defaultLogger = {
let enabled = false
let logger = defaultLogger
-let logLevel = getChannelLogLevel()
+let logChannel = new LogChannel()
function withNoop (fn) {
const store = storage.getStore()
@@ -23,45 +22,21 @@ function withNoop (fn) {
}
function unsubscribeAll () {
- if (debugChannel.hasSubscribers) {
- debugChannel.unsubscribe(onDebug)
- }
- if (infoChannel.hasSubscribers) {
- infoChannel.unsubscribe(onInfo)
- }
- if (warnChannel.hasSubscribers) {
- warnChannel.unsubscribe(onWarn)
- }
- if (errorChannel.hasSubscribers) {
- errorChannel.unsubscribe(onError)
- }
+ logChannel.unsubscribe({ debug, info, warn, error })
}
-function toggleSubscription (enable) {
+function toggleSubscription (enable, level) {
unsubscribeAll()
if (enable) {
- if (debugChannel.logLevel >= logLevel) {
- debugChannel.subscribe(onDebug)
- }
- if (infoChannel.logLevel >= logLevel) {
- infoChannel.subscribe(onInfo)
- }
- if (warnChannel.logLevel >= logLevel) {
- warnChannel.subscribe(onWarn)
- }
- if (errorChannel.logLevel >= logLevel) {
- errorChannel.subscribe(onError)
- }
+ logChannel = new LogChannel(level)
+ logChannel.subscribe({ debug, info, warn, error })
}
}
function toggle (enable, level) {
- if (level !== undefined) {
- logLevel = getChannelLogLevel(level)
- }
enabled = enable
- toggleSubscription(enabled)
+ toggleSubscription(enabled, level)
}
function use (newLogger) {
@@ -73,26 +48,9 @@ function use (newLogger) {
function reset () {
logger = defaultLogger
enabled = false
- logLevel = getChannelLogLevel()
toggleSubscription(false)
}
-function onError (err) {
- if (enabled) error(err)
-}
-
-function onWarn (message) {
- if (enabled) warn(message)
-}
-
-function onInfo (message) {
- if (enabled) info(message)
-}
-
-function onDebug (message) {
- if (enabled) debug(message)
-}
-
function error (err) {
if (typeof err !== 'object' || !err) {
err = String(err)
diff --git a/packages/dd-trace/src/opentracing/propagation/text_map.js b/packages/dd-trace/src/opentracing/propagation/text_map.js
index 5146a48806c..0b74674f21e 100644
--- a/packages/dd-trace/src/opentracing/propagation/text_map.js
+++ b/packages/dd-trace/src/opentracing/propagation/text_map.js
@@ -5,6 +5,7 @@ const id = require('../../id')
const DatadogSpanContext = require('../span_context')
const log = require('../../log')
const TraceState = require('./tracestate')
+const tags = require('../../../../../ext/tags')
const { AUTO_KEEP, AUTO_REJECT, USER_KEEP } = require('../../../../../ext/priority')
@@ -39,6 +40,7 @@ const tracestateTagKeyFilter = /[^\x21-\x2b\x2d-\x3c\x3e-\x7e]/g
// Tag values in tracestate replace ',', '~' and ';' with '_'
const tracestateTagValueFilter = /[^\x20-\x2b\x2d-\x3a\x3c-\x7d]/g
const invalidSegment = /^0+$/
+const zeroTraceId = '0000000000000000'
class TextMapPropagator {
constructor (config) {
@@ -175,9 +177,9 @@ class TextMapPropagator {
// SpanContext was created by a ddtrace span.
// Last datadog span id should be set to the current span.
state.set('p', spanContext._spanId)
- } else if (spanContext._trace.tags['_dd.parent_id']) {
+ } else if (spanContext._trace.tags[tags.DD_PARENT_ID]) {
// Propagate the last Datadog span id set on the remote span.
- state.set('p', spanContext._trace.tags['_dd.parent_id'])
+ state.set('p', spanContext._trace.tags[tags.DD_PARENT_ID])
}
state.set('s', priority)
if (mechanism) {
@@ -214,9 +216,56 @@ class TextMapPropagator {
return this._config.tracePropagationStyle[mode].includes(name)
}
+ _hasTraceIdConflict (w3cSpanContext, firstSpanContext) {
+ return w3cSpanContext !== null &&
+ firstSpanContext.toTraceId(true) === w3cSpanContext.toTraceId(true) &&
+ firstSpanContext.toSpanId() !== w3cSpanContext.toSpanId()
+ }
+
+ _hasParentIdInTags (spanContext) {
+ return tags.DD_PARENT_ID in spanContext._trace.tags &&
+ spanContext._trace.tags[tags.DD_PARENT_ID] !== zeroTraceId
+ }
+
+ _updateParentIdFromDdHeaders (carrier, firstSpanContext) {
+ const ddCtx = this._extractDatadogContext(carrier)
+ if (ddCtx !== null) {
+ firstSpanContext._trace.tags[tags.DD_PARENT_ID] = ddCtx._spanId.toString().padStart(16, '0')
+ }
+ }
+
+ _resolveTraceContextConflicts (w3cSpanContext, firstSpanContext, carrier) {
+ if (!this._hasTraceIdConflict(w3cSpanContext, firstSpanContext)) {
+ return firstSpanContext
+ }
+ if (this._hasParentIdInTags(w3cSpanContext)) {
+ // tracecontext headers contain a p value, ensure this value is sent to backend
+ firstSpanContext._trace.tags[tags.DD_PARENT_ID] = w3cSpanContext._trace.tags[tags.DD_PARENT_ID]
+ } else {
+ // if p value is not present in tracestate, use the parent id from the datadog headers
+ this._updateParentIdFromDdHeaders(carrier, firstSpanContext)
+ }
+ // the span_id in tracecontext takes precedence over the first extracted propagation style
+ firstSpanContext._spanId = w3cSpanContext._spanId
+ return firstSpanContext
+ }
+
_extractSpanContext (carrier) {
+ let spanContext = null
for (const extractor of this._config.tracePropagationStyle.extract) {
- let spanContext = null
+ // add logic to ensure tracecontext headers takes precedence over other extracted headers
+ if (spanContext !== null) {
+ if (this._config.tracePropagationExtractFirst) {
+ return spanContext
+ }
+ if (extractor !== 'tracecontext') {
+ continue
+ }
+ spanContext = this._resolveTraceContextConflicts(
+ this._extractTraceparentContext(carrier), spanContext, carrier)
+ break
+ }
+
switch (extractor) {
case 'datadog':
spanContext = this._extractDatadogContext(carrier)
@@ -238,13 +287,9 @@ class TextMapPropagator {
default:
log.warn(`Unknown propagation style: ${extractor}`)
}
-
- if (spanContext !== null) {
- return spanContext
- }
}
- return this._extractSqsdContext(carrier)
+ return spanContext || this._extractSqsdContext(carrier)
}
_extractDatadogContext (carrier) {
@@ -354,7 +399,7 @@ class TextMapPropagator {
for (const [key, value] of state.entries()) {
switch (key) {
case 'p': {
- spanContext._trace.tags['_dd.parent_id'] = value
+ spanContext._trace.tags[tags.DD_PARENT_ID] = value
break
}
case 's': {
@@ -387,8 +432,8 @@ class TextMapPropagator {
}
})
- if (!spanContext._trace.tags['_dd.parent_id']) {
- spanContext._trace.tags['_dd.parent_id'] = '0000000000000000'
+ if (!spanContext._trace.tags[tags.DD_PARENT_ID]) {
+ spanContext._trace.tags[tags.DD_PARENT_ID] = zeroTraceId
}
this._extractBaggageItems(carrier, spanContext)
@@ -531,7 +576,7 @@ class TextMapPropagator {
const tid = traceId.substring(0, 16)
- if (tid === '0000000000000000') return
+ if (tid === zeroTraceId) return
spanContext._trace.tags['_dd.p.tid'] = tid
}
diff --git a/packages/dd-trace/src/plugins/index.js b/packages/dd-trace/src/plugins/index.js
index 82217ada440..d7193917b05 100644
--- a/packages/dd-trace/src/plugins/index.js
+++ b/packages/dd-trace/src/plugins/index.js
@@ -54,6 +54,7 @@ module.exports = {
get 'microgateway-core' () { return require('../../../datadog-plugin-microgateway-core/src') },
get mocha () { return require('../../../datadog-plugin-mocha/src') },
get 'mocha-each' () { return require('../../../datadog-plugin-mocha/src') },
+ get workerpool () { return require('../../../datadog-plugin-mocha/src') },
get moleculer () { return require('../../../datadog-plugin-moleculer/src') },
get mongodb () { return require('../../../datadog-plugin-mongodb-core/src') },
get 'mongodb-core' () { return require('../../../datadog-plugin-mongodb-core/src') },
diff --git a/packages/dd-trace/src/plugins/util/ip_blocklist.js b/packages/dd-trace/src/plugins/util/ip_blocklist.js
deleted file mode 100644
index f346a1571b8..00000000000
--- a/packages/dd-trace/src/plugins/util/ip_blocklist.js
+++ /dev/null
@@ -1,51 +0,0 @@
-'use strict'
-
-const semver = require('semver')
-
-if (semver.satisfies(process.version, '>=14.18.0')) {
- const net = require('net')
-
- module.exports = net.BlockList
-} else {
- const ipaddr = require('ipaddr.js')
-
- module.exports = class BlockList {
- constructor () {
- this.v4Ranges = []
- this.v6Ranges = []
- }
-
- addSubnet (net, prefix, type) {
- this[type === 'ipv4' ? 'v4Ranges' : 'v6Ranges'].push(ipaddr.parseCIDR(`${net}/${prefix}`))
- }
-
- check (address, type) {
- try {
- let ip = ipaddr.parse(address)
-
- type = ip.kind()
-
- if (type === 'ipv6') {
- for (const range of this.v6Ranges) {
- if (ip.match(range)) return true
- }
-
- if (ip.isIPv4MappedAddress()) {
- ip = ip.toIPv4Address()
- type = ip.kind()
- }
- }
-
- if (type === 'ipv4') {
- for (const range of this.v4Ranges) {
- if (ip.match(range)) return true
- }
- }
-
- return false
- } catch {
- return false
- }
- }
- }
-}
diff --git a/packages/dd-trace/src/plugins/util/ip_extractor.js b/packages/dd-trace/src/plugins/util/ip_extractor.js
index 14d87ec64c0..969b02746b5 100644
--- a/packages/dd-trace/src/plugins/util/ip_extractor.js
+++ b/packages/dd-trace/src/plugins/util/ip_extractor.js
@@ -1,6 +1,6 @@
'use strict'
-const BlockList = require('./ip_blocklist')
+const { BlockList } = require('net')
const net = require('net')
const ipHeaderList = [
diff --git a/packages/dd-trace/src/plugins/util/test.js b/packages/dd-trace/src/plugins/util/test.js
index 23ce067670a..d1d1861ea5d 100644
--- a/packages/dd-trace/src/plugins/util/test.js
+++ b/packages/dd-trace/src/plugins/util/test.js
@@ -62,6 +62,7 @@ const JEST_TEST_RUNNER = 'test.jest.test_runner'
const JEST_DISPLAY_NAME = 'test.jest.display_name'
const CUCUMBER_IS_PARALLEL = 'test.cucumber.is_parallel'
+const MOCHA_IS_PARALLEL = 'test.mocha.is_parallel'
const TEST_ITR_TESTS_SKIPPED = '_dd.ci.itr.tests_skipped'
const TEST_ITR_SKIPPING_ENABLED = 'test.itr.tests_skipping.enabled'
@@ -87,6 +88,9 @@ const JEST_WORKER_COVERAGE_PAYLOAD_CODE = 61
// cucumber worker variables
const CUCUMBER_WORKER_TRACE_PAYLOAD_CODE = 70
+// mocha worker variables
+const MOCHA_WORKER_TRACE_PAYLOAD_CODE = 80
+
// Early flake detection util strings
const EFD_STRING = "Retried by Datadog's Early Flake Detection"
const EFD_TEST_NAME_REGEX = new RegExp(EFD_STRING + ' \\(#\\d+\\): ', 'g')
@@ -98,6 +102,7 @@ module.exports = {
JEST_TEST_RUNNER,
JEST_DISPLAY_NAME,
CUCUMBER_IS_PARALLEL,
+ MOCHA_IS_PARALLEL,
TEST_TYPE,
TEST_NAME,
TEST_SUITE,
@@ -111,6 +116,7 @@ module.exports = {
JEST_WORKER_TRACE_PAYLOAD_CODE,
JEST_WORKER_COVERAGE_PAYLOAD_CODE,
CUCUMBER_WORKER_TRACE_PAYLOAD_CODE,
+ MOCHA_WORKER_TRACE_PAYLOAD_CODE,
TEST_SOURCE_START,
TEST_SKIPPED_BY_ITR,
TEST_CONFIGURATION_BROWSER_NAME,
diff --git a/packages/dd-trace/src/profiler.js b/packages/dd-trace/src/profiler.js
index 349f0438d7c..ce08a3d0f6f 100644
--- a/packages/dd-trace/src/profiler.js
+++ b/packages/dd-trace/src/profiler.js
@@ -9,7 +9,7 @@ process.once('beforeExit', () => { profiler.stop() })
module.exports = {
start: config => {
const { service, version, env, url, hostname, port, tags, repositoryUrl, commitSHA } = config
- const { enabled, sourceMap, exporters } = config.profiling
+ const { enabled, sourceMap, exporters, heuristicsEnabled } = config.profiling
const logger = {
debug: (message) => log.debug(message),
info: (message) => log.info(message),
@@ -19,6 +19,7 @@ module.exports = {
return profiler.start({
enabled,
+ heuristicsEnabled,
service,
version,
env,
diff --git a/packages/dd-trace/src/profiling/config.js b/packages/dd-trace/src/profiling/config.js
index 615a728c968..18b85f7c447 100644
--- a/packages/dd-trace/src/profiling/config.js
+++ b/packages/dd-trace/src/profiling/config.js
@@ -65,6 +65,7 @@ class Config {
DD_PROFILING_PPROF_PREFIX, '')
this.enabled = enabled
+ this.heuristicsEnabled = options.heuristicsEnabled
this.service = service
this.env = env
this.host = host
diff --git a/packages/dd-trace/src/profiling/profiler.js b/packages/dd-trace/src/profiling/profiler.js
index 2c0ebcd5584..17422c5f993 100644
--- a/packages/dd-trace/src/profiling/profiler.js
+++ b/packages/dd-trace/src/profiling/profiler.js
@@ -55,7 +55,7 @@ class Profiler extends EventEmitter {
if (this._enabled) return true
const config = this._config = new Config(options)
- if (!config.enabled) return false
+ if (!config.enabled && !config.heuristicsEnabled) return false
this._logger = config.logger
this._enabled = true
diff --git a/packages/dd-trace/src/profiling/ssi-telemetry.js b/packages/dd-trace/src/profiling/ssi-heuristics.js
similarity index 60%
rename from packages/dd-trace/src/profiling/ssi-telemetry.js
rename to packages/dd-trace/src/profiling/ssi-heuristics.js
index a67cba4793a..f4e10ea4628 100644
--- a/packages/dd-trace/src/profiling/ssi-telemetry.js
+++ b/packages/dd-trace/src/profiling/ssi-heuristics.js
@@ -2,28 +2,25 @@
const telemetryMetrics = require('../telemetry/metrics')
const profilersNamespace = telemetryMetrics.manager.namespace('profilers')
-const performance = require('perf_hooks').performance
const dc = require('dc-polyfill')
-const { isTrue, isFalse } = require('../util')
-// If the process lived for less than 30 seconds, it's considered short-lived
-const DEFAULT_SHORT_LIVED_THRESHOLD = 30000
+// If the process lives for at least 30 seconds, it's considered long-lived
+const DEFAULT_LONG_LIVED_THRESHOLD = 30000
const EnablementChoice = {
MANUALLY_ENABLED: Symbol('SSITelemetry.EnablementChoice.MANUALLY_ENABLED'),
SSI_ENABLED: Symbol('SSITelemetry.EnablementChoice.SSI_ENABLED'),
SSI_NOT_ENABLED: Symbol('SSITelemetry.EnablementChoice.SSI_NOT_ENABLED'),
- DISABLED: Symbol('SSITelemetry.EnablementChoice.MANUALLY_DISABLED')
+ DISABLED: Symbol('SSITelemetry.EnablementChoice.DISABLED')
}
Object.freeze(EnablementChoice)
-function getEnablementChoiceFromEnv () {
- const { DD_PROFILING_ENABLED, DD_INJECTION_ENABLED } = process.env
- if (DD_INJECTION_ENABLED === undefined || isFalse(DD_PROFILING_ENABLED)) {
+function getEnablementChoiceFromConfig (config) {
+ if (config.ssi === false || config.enabled === false) {
return EnablementChoice.DISABLED
- } else if (DD_INJECTION_ENABLED.split(',').includes('profiling')) {
+ } else if (config.heuristicsEnabled === true) {
return EnablementChoice.SSI_ENABLED
- } else if (isTrue(DD_PROFILING_ENABLED)) {
+ } else if (config.enabled === true) {
return EnablementChoice.MANUALLY_ENABLED
} else {
return EnablementChoice.SSI_NOT_ENABLED
@@ -38,39 +35,38 @@ function enablementChoiceToTagValue (enablementChoice) {
return 'ssi_enabled'
case EnablementChoice.SSI_NOT_ENABLED:
return 'not_enabled'
- case EnablementChoice.MANUALLY_DISABLED:
+ case EnablementChoice.DISABLED:
// Can't emit this one as a tag
throw new Error('Invalid enablement choice')
}
}
/**
- * This class emits telemetry metrics about the profiler behavior under SSI. It will only emit metrics
- * when the application closes, and will emit the following metrics:
+ * This class embodies the SSI profiler-triggering heuristics and also emits telemetry metrics about
+ * the profiler behavior under SSI. It emits the following metrics:
* - `number_of_profiles`: The number of profiles that were submitted
- * - `number_of_runtime_id`: The number of runtime IDs in the app (always 1 for Node.js)
- * It will also add tags describing the state of heuristics triggers, the enablement choice, and whether
- * actual profiles were sent (as opposed to mock profiles). There is a mock profiler that is activated
- * when the profiler is not enabled, and it will emit mock profile submission events at the same cadence
- * the profiler would, providing insight into how many profiles would've been emitted if SSI enabled
- * profiling. Note that telemetry is per tracer instance, and each worker thread will have its own instance.
+ * - `number_of_runtime_id`: The number of runtime IDs in the app (always 1 for Node.js, emitted
+ * once when the tags won't change for the remaineder of of the app's lifetime.)
+ * It will also add tags describing the state of heuristics triggers, the enablement choice, and
+ * whether actual profiles were sent (as opposed to mock profiles). There is a mock profiler that is
+ * activated when the profiler is not enabled, and it will emit mock profile submission events at
+ * the same cadence the profiler would, providing insight into how many profiles would've been
+ * emitted if SSI enabled profiling. Note that heuristics (and thus telemetry) is per tracer
+ * instance, and each worker thread will have its own instance.
*/
-class SSITelemetry {
- constructor ({
- enablementChoice = getEnablementChoiceFromEnv(),
- shortLivedThreshold = DEFAULT_SHORT_LIVED_THRESHOLD
- } = {}) {
- if (!Object.values(EnablementChoice).includes(enablementChoice)) {
- throw new Error('Invalid enablement choice')
- }
- if (typeof shortLivedThreshold !== 'number' || shortLivedThreshold <= 0) {
- throw new Error('Short-lived threshold must be a positive number')
+class SSIHeuristics {
+ constructor (config) {
+ this.enablementChoice = getEnablementChoiceFromConfig(config)
+
+ const longLivedThreshold = config.longLivedThreshold || DEFAULT_LONG_LIVED_THRESHOLD
+ if (typeof longLivedThreshold !== 'number' || longLivedThreshold <= 0) {
+ throw new Error('Long-lived threshold must be a positive number')
}
- this.enablementChoice = enablementChoice
- this.shortLivedThreshold = shortLivedThreshold
+ this.longLivedThreshold = longLivedThreshold
this.hasSentProfiles = false
this.noSpan = true
+ this.shortLived = true
}
enabled () {
@@ -83,7 +79,10 @@ class SSITelemetry {
// reference point, but the tracer initialization point is more relevant, as we couldn't be
// collecting profiles earlier anyway. The difference is not particularly significant if the
// tracer is initialized early in the process lifetime.
- this.startTime = performance.now()
+ setTimeout(() => {
+ this.shortLived = false
+ this._maybeTriggered()
+ }, this.longLivedThreshold).unref()
this._onSpanCreated = this._onSpanCreated.bind(this)
this._onProfileSubmitted = this._onProfileSubmitted.bind(this)
@@ -97,8 +96,31 @@ class SSITelemetry {
}
}
+ onTriggered (callback) {
+ switch (typeof callback) {
+ case 'undefined':
+ case 'function':
+ this.triggeredCallback = callback
+ process.nextTick(() => {
+ this._maybeTriggered()
+ })
+ break
+ default:
+ throw new TypeError('callback must be a function or undefined')
+ }
+ }
+
+ _maybeTriggered () {
+ if (!this.shortLived && !this.noSpan) {
+ if (typeof this.triggeredCallback === 'function') {
+ this.triggeredCallback.call(null)
+ }
+ }
+ }
+
_onSpanCreated () {
this.noSpan = false
+ this._maybeTriggered()
dc.unsubscribe('dd-trace:span:start', this._onSpanCreated)
}
@@ -121,7 +143,7 @@ class SSITelemetry {
if (this.noSpan) {
decision.push('no_span')
}
- if (performance.now() - this.startTime < this.shortLivedThreshold) {
+ if (this.shortLived) {
decision.push('short_lived')
}
if (decision.length === 0) {
@@ -138,8 +160,14 @@ class SSITelemetry {
this._profileCount = profilersNamespace.count('ssi_heuristic.number_of_profiles', tags)
this._runtimeIdCount = profilersNamespace.count('ssi_heuristic.number_of_runtime_id', tags)
- if (!this._emittedRuntimeId && decision[0] === 'triggered') {
- // Tags won't change anymore, so we can emit the runtime ID metric now
+ if (
+ !this._emittedRuntimeId &&
+ decision[0] === 'triggered' &&
+ // When enablement choice is SSI_ENABLED, hasSentProfiles can transition from false to true when the
+ // profiler gets started and the first profile is submitted, so we have to wait for it.
+ (this.enablementChoice !== EnablementChoice.SSI_ENABLED || this.hasSentProfiles)
+ ) {
+ // Tags won't change anymore, so we can emit the runtime ID metric now.
this._emittedRuntimeId = true
this._runtimeIdCount.inc()
}
@@ -164,4 +192,4 @@ class SSITelemetry {
}
}
-module.exports = { SSITelemetry, EnablementChoice }
+module.exports = { SSIHeuristics, EnablementChoice }
diff --git a/packages/dd-trace/src/profiling/ssi-telemetry-mock-profiler.js b/packages/dd-trace/src/profiling/ssi-telemetry-mock-profiler.js
index 2a164e0851c..564046e383b 100644
--- a/packages/dd-trace/src/profiling/ssi-telemetry-mock-profiler.js
+++ b/packages/dd-trace/src/profiling/ssi-telemetry-mock-profiler.js
@@ -12,16 +12,11 @@ module.exports = {
// Copied from packages/dd-trace/src/profiler.js
const flushInterval = coalesce(config.interval, Number(DD_PROFILING_UPLOAD_PERIOD) * 1000, 65 * 1000)
- function scheduleProfileSubmit () {
- timerId = setTimeout(emitProfileSubmit, flushInterval)
- }
-
- function emitProfileSubmit () {
+ timerId = setTimeout(() => {
profileSubmittedChannel.publish()
- scheduleProfileSubmit()
- }
-
- scheduleProfileSubmit()
+ timerId.refresh()
+ }, flushInterval)
+ timerId.unref()
},
stop: () => {
diff --git a/packages/dd-trace/src/proxy.js b/packages/dd-trace/src/proxy.js
index 7d47b059142..f7b1c9e26eb 100644
--- a/packages/dd-trace/src/proxy.js
+++ b/packages/dd-trace/src/proxy.js
@@ -13,7 +13,7 @@ const AppsecSdk = require('./appsec/sdk')
const dogstatsd = require('./dogstatsd')
const NoopDogStatsDClient = require('./noop/dogstatsd')
const spanleak = require('./spanleak')
-const { SSITelemetry } = require('./profiling/ssi-telemetry')
+const { SSIHeuristics } = require('./profiling/ssi-heuristics')
const telemetryLog = require('dc-polyfill').channel('datadog:telemetry:log')
class LazyModule {
@@ -40,6 +40,7 @@ class Tracer extends NoopProxy {
this._pluginManager = new PluginManager(this)
this.dogstatsd = new NoopDogStatsDClient()
this._tracingInitialized = false
+ this._flare = new LazyModule(() => require('./flare'))
// these requires must work with esm bundler
this._modules = {
@@ -90,29 +91,46 @@ class Tracer extends NoopProxy {
}
this._enableOrDisableTracing(config)
})
+
+ rc.on('AGENT_CONFIG', (action, conf) => {
+ if (!conf?.name?.startsWith('flare-log-level.')) return
+
+ if (action === 'unapply') {
+ this._flare.disable()
+ } else if (conf.config?.log_level) {
+ this._flare.enable(config)
+ this._flare.module.prepare(conf.config.log_level)
+ }
+ })
+
+ rc.on('AGENT_TASK', (action, conf) => {
+ if (action === 'unapply' || !conf) return
+ if (conf.task_type !== 'tracer_flare' || !conf.args) return
+
+ this._flare.enable(config)
+ this._flare.module.send(conf.args)
+ })
}
if (config.isGCPFunction || config.isAzureFunction) {
require('./serverless').maybeStartServerlessMiniAgent(config)
}
- const ssiTelemetry = new SSITelemetry()
- ssiTelemetry.start()
+ const ssiHeuristics = new SSIHeuristics(config.profiling)
+ ssiHeuristics.start()
if (config.profiling.enabled) {
- // do not stop tracer initialization if the profiler fails to be imported
- try {
- const profiler = require('./profiler')
- this._profilerStarted = profiler.start(config)
- } catch (e) {
- log.error(e)
- telemetryLog.publish({
- message: e.message,
- level: 'ERROR',
- stack_trace: e.stack
+ this._profilerStarted = this._startProfiler(config)
+ } else if (config.profiling.ssi) {
+ const mockProfiler = require('./profiling/ssi-telemetry-mock-profiler')
+ mockProfiler.start(config)
+
+ if (config.profiling.heuristicsEnabled) {
+ ssiHeuristics.onTriggered(() => {
+ mockProfiler.stop()
+ this._startProfiler(config)
+ ssiHeuristics.onTriggered()
})
}
- } else if (ssiTelemetry.enabled()) {
- require('./profiling/ssi-telemetry-mock-profiler').start(config)
}
if (!this._profilerStarted) {
this._profilerStarted = Promise.resolve(false)
@@ -138,6 +156,22 @@ class Tracer extends NoopProxy {
return this
}
+ _startProfiler (config) {
+ // do not stop tracer initialization if the profiler fails to be imported
+ try {
+ return require('./profiler').start(config)
+ } catch (e) {
+ log.error(e)
+ if (telemetryLog.hasSubscribers) {
+ telemetryLog.publish({
+ message: e.message,
+ level: 'ERROR',
+ stack_trace: e.stack
+ })
+ }
+ }
+ }
+
_enableOrDisableTracing (config) {
if (config.tracing !== false) {
if (config.appsec.enabled) {
diff --git a/packages/dd-trace/src/ritm.js b/packages/dd-trace/src/ritm.js
index 509e9ad732e..882e1509cdf 100644
--- a/packages/dd-trace/src/ritm.js
+++ b/packages/dd-trace/src/ritm.js
@@ -50,8 +50,20 @@ function Hook (modules, options, onrequire) {
if (patchedRequire) return
+ const _origRequire = Module.prototype.require
patchedRequire = Module.prototype.require = function (request) {
- const filename = Module._resolveFilename(request, this)
+ /*
+ If resolving the filename for a `require(...)` fails, defer to the wrapped
+ require implementation rather than failing right away. This allows a
+ possibly monkey patched `require` to work.
+ */
+ let filename
+ try {
+ filename = Module._resolveFilename(request, this)
+ } catch (resolveErr) {
+ return _origRequire.apply(this, arguments)
+ }
+
const core = filename.indexOf(path.sep) === -1
let name, basedir, hooks
// return known patched modules immediately
diff --git a/packages/dd-trace/src/startup-log.js b/packages/dd-trace/src/startup-log.js
index af3aa858476..12086ae1168 100644
--- a/packages/dd-trace/src/startup-log.js
+++ b/packages/dd-trace/src/startup-log.js
@@ -37,6 +37,23 @@ function startupLog ({ agentError } = {}) {
return
}
+ const out = tracerInfo({ agentError })
+
+ if (agentError) {
+ out.agent_error = agentError.message
+ }
+
+ info('DATADOG TRACER CONFIGURATION - ' + out)
+ if (agentError) {
+ warn('DATADOG TRACER DIAGNOSTIC - Agent Error: ' + agentError.message)
+ errors.agentError = {
+ code: agentError.code ? agentError.code : '',
+ message: `Agent Error:${agentError.message}`
+ }
+ }
+}
+
+function tracerInfo () {
const url = config.url || `http://${config.hostname || 'localhost'}:${config.port}`
const out = {
@@ -59,9 +76,6 @@ function startupLog ({ agentError } = {}) {
out.enabled = config.enabled
out.service = config.service
out.agent_url = url
- if (agentError) {
- out.agent_error = agentError.message
- }
out.debug = !!config.debug
out.sample_rate = config.sampler.sampleRate
out.sampling_rules = samplingRules
@@ -87,18 +101,7 @@ function startupLog ({ agentError } = {}) {
// out.service_mapping
// out.service_mapping_error
- info('DATADOG TRACER CONFIGURATION - ' + out)
- if (agentError) {
- warn('DATADOG TRACER DIAGNOSTIC - Agent Error: ' + agentError.message)
- errors.agentError = {
- code: agentError.code ? agentError.code : '',
- message: `Agent Error:${agentError.message}`
- }
- }
-
- config = undefined
- pluginManager = undefined
- samplingRules = undefined
+ return out
}
function setStartupLogConfig (aConfig) {
@@ -118,5 +121,6 @@ module.exports = {
setStartupLogConfig,
setStartupLogPluginManager,
setSamplingRules,
+ tracerInfo,
errors
}
diff --git a/packages/dd-trace/src/telemetry/index.js b/packages/dd-trace/src/telemetry/index.js
index 312f58e3666..dea883ffb12 100644
--- a/packages/dd-trace/src/telemetry/index.js
+++ b/packages/dd-trace/src/telemetry/index.js
@@ -313,7 +313,8 @@ function updateConfig (changes, config) {
sampleRate: 'DD_TRACE_SAMPLE_RATE',
logInjection: 'DD_LOG_INJECTION',
headerTags: 'DD_TRACE_HEADER_TAGS',
- tags: 'DD_TAGS'
+ tags: 'DD_TAGS',
+ 'sampler.rules': 'DD_TRACE_SAMPLING_RULES'
}
const namesNeedFormatting = new Set(['DD_TAGS', 'peerServiceMapping'])
@@ -328,9 +329,12 @@ function updateConfig (changes, config) {
const { origin, value } = change
const entry = { name, value, origin }
- if (Array.isArray(value)) entry.value = value.join(',')
if (namesNeedFormatting.has(entry.name)) entry.value = formatMapForTelemetry(entry.value)
if (entry.name === 'url' && entry.value) entry.value = entry.value.toString()
+ if (entry.name === 'DD_TRACE_SAMPLING_RULES') {
+ entry.value = JSON.stringify(entry.value)
+ }
+ if (Array.isArray(entry.value)) entry.value = value.join(',')
configuration.push(entry)
}
diff --git a/packages/dd-trace/src/tracer.js b/packages/dd-trace/src/tracer.js
index 63c60e81440..5c36d0ee90e 100644
--- a/packages/dd-trace/src/tracer.js
+++ b/packages/dd-trace/src/tracer.js
@@ -11,6 +11,8 @@ const { DataStreamsProcessor } = require('./datastreams/processor')
const { DsmPathwayCodec } = require('./datastreams/pathway')
const { DD_MAJOR } = require('../../../version')
const DataStreamsContext = require('./data_streams_context')
+const { flushStartupLogs } = require('../../datadog-instrumentations/src/check_require_cache')
+const log = require('./log/writer')
const SPAN_TYPE = tags.SPAN_TYPE
const RESOURCE_NAME = tags.RESOURCE_NAME
@@ -23,6 +25,7 @@ class DatadogTracer extends Tracer {
this._dataStreamsProcessor = new DataStreamsProcessor(config)
this._scope = new Scope()
setStartupLogConfig(config)
+ flushStartupLogs(log)
}
configure ({ env, sampler }) {
diff --git a/packages/dd-trace/test/appsec/blocking.spec.js b/packages/dd-trace/test/appsec/blocking.spec.js
index 600ce92772c..a0d454a77c7 100644
--- a/packages/dd-trace/test/appsec/blocking.spec.js
+++ b/packages/dd-trace/test/appsec/blocking.spec.js
@@ -1,7 +1,5 @@
'use strict'
-const { AbortController } = require('node-abort-controller')
-
describe('blocking', () => {
const defaultBlockedTemplate = {
html: 'block test',
@@ -16,7 +14,7 @@ describe('blocking', () => {
}
let log
- let block, setTemplates, updateBlockingConfiguration
+ let block, setTemplates
let req, res, rootSpan
beforeEach(() => {
@@ -31,7 +29,6 @@ describe('blocking', () => {
block = blocking.block
setTemplates = blocking.setTemplates
- updateBlockingConfiguration = blocking.updateBlockingConfiguration
req = {
headers: {}
@@ -149,18 +146,14 @@ describe('blocking', () => {
}
it('should block with default html template and custom status', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'auto'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'auto'
+ }
req.headers.accept = 'text/html'
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.html)
@@ -168,18 +161,14 @@ describe('blocking', () => {
it('should block with default json template and custom status ' +
'when type is forced to json and accept is html', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'json'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'json'
+ }
req.headers.accept = 'text/html'
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.json)
@@ -187,35 +176,27 @@ describe('blocking', () => {
it('should block with default html template and custom status ' +
'when type is forced to html and accept is html', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'html'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'html'
+ }
req.headers.accept = 'text/html'
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.html)
})
it('should block with default json template and custom status', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'auto'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'auto'
+ }
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.json)
@@ -223,17 +204,13 @@ describe('blocking', () => {
it('should block with default json template and custom status ' +
'when type is forced to json and accept is not defined', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'json'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'json'
+ }
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.json)
@@ -241,34 +218,26 @@ describe('blocking', () => {
it('should block with default html template and custom status ' +
'when type is forced to html and accept is not defined', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'block_request',
- parameters: {
- status_code: 401,
- type: 'html'
- }
- })
+ const actionParameters = {
+ status_code: 401,
+ type: 'html'
+ }
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWith(401)
expect(res.end).to.have.been.calledOnceWithExactly(defaultBlockedTemplate.html)
})
it('should block with custom redirect', () => {
- updateBlockingConfiguration({
- id: 'block',
- type: 'redirect_request',
- parameters: {
- status_code: 301,
- location: '/you-have-been-blocked'
- }
- })
+ const actionParameters = {
+ status_code: 301,
+ location: '/you-have-been-blocked'
+ }
setTemplates(config)
- block(req, res, rootSpan)
+ block(req, res, rootSpan, null, actionParameters)
expect(res.writeHead).to.have.been.calledOnceWithExactly(301, {
Location: '/you-have-been-blocked'
@@ -277,3 +246,41 @@ describe('blocking', () => {
})
})
})
+
+describe('waf actions', () => {
+ const blocking = require('../../src/appsec/blocking')
+
+ it('get block_request as blocking action', () => {
+ const blockRequestActionParameters = {
+ status_code: 401,
+ type: 'html'
+ }
+ const actions = {
+ block_request: blockRequestActionParameters
+ }
+ expect(blocking.getBlockingAction(actions)).to.be.deep.equal(blockRequestActionParameters)
+ })
+
+ it('get redirect_request as blocking action', () => {
+ const redirectRequestActionParameters = {
+ status_code: 301
+ }
+
+ const actions = {
+ redirect_request: redirectRequestActionParameters
+ }
+ expect(blocking.getBlockingAction(actions)).to.be.deep.equal(redirectRequestActionParameters)
+ })
+
+ it('get undefined when no actions', () => {
+ const actions = {}
+ expect(blocking.getBlockingAction(actions)).to.be.undefined
+ })
+
+ it('get undefined when generate_stack action', () => {
+ const actions = {
+ generate_stack: {}
+ }
+ expect(blocking.getBlockingAction(actions)).to.be.undefined
+ })
+})
diff --git a/packages/dd-trace/test/appsec/graphql.spec.js b/packages/dd-trace/test/appsec/graphql.spec.js
index 827c7915d06..1f3fcec6cc2 100644
--- a/packages/dd-trace/test/appsec/graphql.spec.js
+++ b/packages/dd-trace/test/appsec/graphql.spec.js
@@ -213,9 +213,17 @@ describe('GraphQL', () => {
user: [{ id: '1234' }]
}
+ const blockParameters = {
+ status_code: '401',
+ type: 'auto',
+ grpc_status_code: '10'
+ }
+
const abortController = context.abortController
- sinon.stub(waf, 'run').returns(['block'])
+ sinon.stub(waf, 'run').returns({
+ block_request: blockParameters
+ })
sinon.stub(web, 'root').returns({})
startGraphqlResolve.publish({ context, resolverInfo })
@@ -231,7 +239,7 @@ describe('GraphQL', () => {
const abortData = {}
apolloChannel.asyncEnd.publish({ abortController, abortData })
- expect(blocking.getBlockingData).to.have.been.calledOnceWithExactly(req, 'graphql', {})
+ expect(blocking.getBlockingData).to.have.been.calledOnceWithExactly(req, 'graphql', {}, blockParameters)
})
})
})
diff --git a/packages/dd-trace/test/appsec/index.spec.js b/packages/dd-trace/test/appsec/index.spec.js
index febd54244a6..febac128f83 100644
--- a/packages/dd-trace/test/appsec/index.spec.js
+++ b/packages/dd-trace/test/appsec/index.spec.js
@@ -24,6 +24,14 @@ const { storage } = require('../../../datadog-core')
const telemetryMetrics = require('../../src/telemetry/metrics')
const addresses = require('../../src/appsec/addresses')
+const resultActions = {
+ block_request: {
+ status_code: '401',
+ type: 'auto',
+ grpc_status_code: '10'
+ }
+}
+
describe('AppSec Index', () => {
let config
let AppSec
@@ -34,6 +42,7 @@ describe('AppSec Index', () => {
let appsecTelemetry
let graphql
let apiSecuritySampler
+ let rasp
const RULES = { rules: [{ a: 1 }] }
@@ -55,6 +64,9 @@ describe('AppSec Index', () => {
apiSecurity: {
enabled: false,
requestSampling: 0
+ },
+ rasp: {
+ enabled: true
}
}
}
@@ -91,6 +103,11 @@ describe('AppSec Index', () => {
sinon.spy(apiSecuritySampler, 'sampleRequest')
sinon.spy(apiSecuritySampler, 'isSampled')
+ rasp = {
+ enable: sinon.stub(),
+ disable: sinon.stub()
+ }
+
AppSec = proxyquire('../../src/appsec', {
'../log': log,
'../plugins/util/web': web,
@@ -98,7 +115,8 @@ describe('AppSec Index', () => {
'./passport': passport,
'./telemetry': appsecTelemetry,
'./graphql': graphql,
- './api_security_sampler': apiSecuritySampler
+ './api_security_sampler': apiSecuritySampler,
+ './rasp': rasp
})
sinon.stub(fs, 'readFileSync').returns(JSON.stringify(RULES))
@@ -175,6 +193,19 @@ describe('AppSec Index', () => {
expect(appsecTelemetry.enable).to.be.calledOnceWithExactly(config.telemetry)
})
+
+ it('should call rasp enable', () => {
+ AppSec.enable(config)
+
+ expect(rasp.enable).to.be.calledOnceWithExactly()
+ })
+
+ it('should not call rasp enable when rasp is disabled', () => {
+ config.appsec.rasp.enabled = false
+ AppSec.enable(config)
+
+ expect(rasp.enable).to.not.be.called
+ })
})
describe('disable', () => {
@@ -196,6 +227,7 @@ describe('AppSec Index', () => {
.to.have.been.calledOnceWithExactly(AppSec.incomingHttpStartTranslator)
expect(incomingHttpRequestEnd.unsubscribe).to.have.been.calledOnceWithExactly(AppSec.incomingHttpEndTranslator)
expect(graphql.disable).to.have.been.calledOnceWithExactly()
+ expect(rasp.disable).to.have.been.calledOnceWithExactly()
})
it('should disable AppSec when DC channels are not active', () => {
@@ -584,7 +616,7 @@ describe('AppSec Index', () => {
expect(apiSecuritySampler.isSampled).to.have.been.calledOnceWith(req)
expect(waf.run).to.been.calledOnceWith({
persistent: {
- [addresses.HTTP_OUTGOING_BODY]: body
+ [addresses.HTTP_INCOMING_RESPONSE_BODY]: body
}
}, req)
})
@@ -662,7 +694,7 @@ describe('AppSec Index', () => {
it('Should block when it is detected as attack', () => {
const body = { key: 'value' }
req.body = body
- sinon.stub(waf, 'run').returns(['block'])
+ sinon.stub(waf, 'run').returns(resultActions)
bodyParser.publish({ req, res, body, abortController })
@@ -704,7 +736,7 @@ describe('AppSec Index', () => {
it('Should block when it is detected as attack', () => {
const cookies = { key: 'value' }
- sinon.stub(waf, 'run').returns(['block'])
+ sinon.stub(waf, 'run').returns(resultActions)
cookieParser.publish({ req, res, abortController, cookies })
@@ -748,7 +780,7 @@ describe('AppSec Index', () => {
it('Should block when it is detected as attack', () => {
const query = { key: 'value' }
req.query = query
- sinon.stub(waf, 'run').returns(['block'])
+ sinon.stub(waf, 'run').returns(resultActions)
queryParser.publish({ req, res, query, abortController })
diff --git a/packages/dd-trace/test/appsec/rasp.express.plugin.spec.js b/packages/dd-trace/test/appsec/rasp.express.plugin.spec.js
new file mode 100644
index 00000000000..249af2ae727
--- /dev/null
+++ b/packages/dd-trace/test/appsec/rasp.express.plugin.spec.js
@@ -0,0 +1,116 @@
+'use strict'
+
+const Axios = require('axios')
+const agent = require('../plugins/agent')
+const getPort = require('get-port')
+const appsec = require('../../src/appsec')
+const Config = require('../../src/config')
+const path = require('path')
+const { assert } = require('chai')
+
+withVersions('express', 'express', expressVersion => {
+ describe('RASP', () => {
+ let app, server, port, axios
+
+ before(() => {
+ return agent.load(['http'], { client: false })
+ })
+
+ before((done) => {
+ const express = require(`../../../../versions/express@${expressVersion}`).get()
+ const expressApp = express()
+
+ expressApp.get('/', (req, res) => {
+ app(req, res)
+ })
+
+ appsec.enable(new Config({
+ appsec: {
+ enabled: true,
+ rules: path.join(__dirname, 'rasp_rules.json'),
+ rasp: { enabled: true }
+ }
+ }))
+
+ getPort().then(newPort => {
+ port = newPort
+ axios = Axios.create({
+ baseURL: `http://localhost:${port}`
+ })
+ server = expressApp.listen(port, () => {
+ done()
+ })
+ })
+ })
+
+ after(() => {
+ appsec.disable()
+ server.close()
+ return agent.close({ ritmReset: false })
+ })
+
+ function getWebSpan (traces) {
+ for (const trace of traces) {
+ for (const span of trace) {
+ if (span.type === 'web') {
+ return span
+ }
+ }
+ }
+ throw new Error('web span not found')
+ }
+
+ describe('ssrf', () => {
+ ['http', 'https'].forEach(protocol => {
+ describe(`Test using ${protocol}`, () => {
+ it('Should not detect threat', async () => {
+ app = (req, res) => {
+ require(protocol).get(`${protocol}://${req.query.host}`)
+ res.end('end')
+ }
+
+ axios.get('/?host=www.datadoghq.com')
+
+ await agent.use((traces) => {
+ const span = getWebSpan(traces)
+ assert.notProperty(span.meta, '_dd.appsec.json')
+ })
+ })
+
+ it('Should detect threat doing a GET request', async () => {
+ app = (req, res) => {
+ require(protocol).get(`${protocol}://${req.query.host}`)
+ res.end('end')
+ }
+
+ axios.get('/?host=ifconfig.pro')
+
+ await agent.use((traces) => {
+ const span = getWebSpan(traces)
+ assert.property(span.meta, '_dd.appsec.json')
+ assert(span.meta['_dd.appsec.json'].includes('rasp-ssrf-rule-id-1'))
+ })
+ })
+
+ it('Should detect threat doing a POST request', async () => {
+ app = (req, res) => {
+ const clientRequest = require(protocol)
+ .request(`${protocol}://${req.query.host}`, { method: 'POST' })
+ clientRequest.write('dummy_post_data')
+ clientRequest.end()
+ res.end('end')
+ }
+
+ axios.get('/?host=ifconfig.pro')
+
+ await agent.use((traces) => {
+ const span = getWebSpan(traces)
+ assert.property(span.meta, '_dd.appsec.json')
+ assert(span.meta['_dd.appsec.json'].includes('rasp-ssrf-rule-id-1'))
+ })
+ })
+ })
+ })
+ })
+ })
+})
diff --git a/packages/dd-trace/test/appsec/rasp.spec.js b/packages/dd-trace/test/appsec/rasp.spec.js
new file mode 100644
index 00000000000..7f7d6dc4c50
--- /dev/null
+++ b/packages/dd-trace/test/appsec/rasp.spec.js
@@ -0,0 +1,99 @@
+'use strict'
+
+const proxyquire = require('proxyquire')
+const { httpClientRequestStart } = require('../../src/appsec/channels')
+const addresses = require('../../src/appsec/addresses')
+
+describe('RASP', () => {
+ let waf, rasp, datadogCore
+ beforeEach(() => {
+ datadogCore = {
+ storage: {
+ getStore: sinon.stub()
+ }
+ }
+ waf = {
+ run: sinon.stub()
+ }
+
+ rasp = proxyquire('../../src/appsec/rasp', {
+ '../../../datadog-core': datadogCore,
+ './waf': waf
+ })
+
+ rasp.enable()
+ })
+
+ afterEach(() => {
+ rasp.disable()
+ })
+
+ describe('analyzeSsrf', () => {
+ it('should analyze ssrf', () => {
+ const ctx = {
+ args: {
+ uri: 'http://example.com'
+ }
+ }
+ const req = {}
+ datadogCore.storage.getStore.returns({ req })
+
+ httpClientRequestStart.publish(ctx)
+
+ const persistent = { [addresses.HTTP_OUTGOING_URL]: 'http://example.com' }
+ sinon.assert.calledOnceWithExactly(waf.run, { persistent }, req)
+ })
+
+ it('should not analyze ssrf if rasp is disabled', () => {
+ rasp.disable()
+ const ctx = {
+ args: {
+ uri: 'http://example.com'
+ }
+ }
+ const req = {}
+ datadogCore.storage.getStore.returns({ req })
+
+ httpClientRequestStart.publish(ctx)
+
+ sinon.assert.notCalled(waf.run)
+ })
+
+ it('should not analyze ssrf if no store', () => {
+ const ctx = {
+ args: {
+ uri: 'http://example.com'
+ }
+ }
+ datadogCore.storage.getStore.returns(undefined)
+
+ httpClientRequestStart.publish(ctx)
+
+ sinon.assert.notCalled(waf.run)
+ })
+
+ it('should not analyze ssrf if no req', () => {
+ const ctx = {
+ args: {
+ uri: 'http://example.com'
+ }
+ }
+ datadogCore.storage.getStore.returns({})
+
+ httpClientRequestStart.publish(ctx)
+
+ sinon.assert.notCalled(waf.run)
+ })
+
+ it('should not analyze ssrf if no url', () => {
+ const ctx = {
+ args: {}
+ }
+ datadogCore.storage.getStore.returns({})
+
+ httpClientRequestStart.publish(ctx)
+
+ sinon.assert.notCalled(waf.run)
+ })
+ })
+})
diff --git a/packages/dd-trace/test/appsec/rasp_rules.json b/packages/dd-trace/test/appsec/rasp_rules.json
new file mode 100644
index 00000000000..7b01675dcaa
--- /dev/null
+++ b/packages/dd-trace/test/appsec/rasp_rules.json
@@ -0,0 +1,58 @@
+{
+ "version": "2.2",
+ "metadata": {
+ "rules_version": "1.99.0"
+ },
+ "rules": [
+ {
+ "id": "rasp-ssrf-rule-id-1",
+ "name": "Server-side request forgery exploit",
+ "enabled": true,
+ "tags": {
+ "type": "ssrf",
+ "category": "vulnerability_trigger",
+ "cwe": "918",
+ "capec": "1000/225/115/664",
+ "confidence": "0",
+ "module": "rasp"
+ },
+ "conditions": [
+ {
+ "parameters": {
+ "resource": [
+ {
+ "address": "server.io.net.url"
+ }
+ ],
+ "params": [
+ {
+ "address": "server.request.query"
+ },
+ {
+ "address": "server.request.body"
+ },
+ {
+ "address": "server.request.path_params"
+ },
+ {
+ "address": "grpc.server.request.message"
+ },
+ {
+ "address": "graphql.server.all_resolvers"
+ },
+ {
+ "address": "graphql.server.resolver"
+ }
+ ]
+ },
+ "operator": "ssrf_detector"
+ }
+ ],
+ "transformers": [],
+ "on_match": [
+ "block",
+ "stack_trace"
+ ]
+ }
+ ]
+}
diff --git a/packages/dd-trace/test/appsec/rule_manager.spec.js b/packages/dd-trace/test/appsec/rule_manager.spec.js
index e7daca53341..7340213e16d 100644
--- a/packages/dd-trace/test/appsec/rule_manager.spec.js
+++ b/packages/dd-trace/test/appsec/rule_manager.spec.js
@@ -1,14 +1,11 @@
'use strict'
-const fs = require('fs')
-const path = require('path')
const { loadRules, clearAllRules, updateWafFromRC } = require('../../src/appsec/rule_manager')
const Config = require('../../src/config')
const { ACKNOWLEDGED } = require('../../src/appsec/remote_config/apply_states')
const rules = require('../../src/appsec/recommended.json')
const waf = require('../../src/appsec/waf')
-const blocking = require('../../src/appsec/blocking')
describe('AppSec Rule Manager', () => {
let config
@@ -20,8 +17,6 @@ describe('AppSec Rule Manager', () => {
sinon.stub(waf, 'init').callThrough()
sinon.stub(waf, 'destroy').callThrough()
sinon.stub(waf, 'update').callThrough()
-
- sinon.stub(blocking, 'updateBlockingConfiguration').callThrough()
})
afterEach(() => {
@@ -34,22 +29,6 @@ describe('AppSec Rule Manager', () => {
loadRules(config.appsec)
expect(waf.init).to.have.been.calledOnceWithExactly(rules, config.appsec)
- expect(blocking.updateBlockingConfiguration).not.to.have.been.called
- })
-
- it('should call updateBlockingConfiguration with proper params', () => {
- const rulesPath = path.join(__dirname, './blocking-actions-rules.json')
- const testRules = JSON.parse(fs.readFileSync(rulesPath))
-
- config.appsec.rules = rulesPath
-
- loadRules(config.appsec)
-
- expect(waf.init).to.have.been.calledOnceWithExactly(testRules, config.appsec)
- expect(blocking.updateBlockingConfiguration).to.have.been.calledOnceWithExactly({
- id: 'block',
- otherParam: 'other'
- })
})
it('should throw if null/undefined are passed', () => {
@@ -69,7 +48,6 @@ describe('AppSec Rule Manager', () => {
clearAllRules()
expect(waf.destroy).to.have.been.calledOnce
- expect(blocking.updateBlockingConfiguration).to.have.been.calledOnceWithExactly(undefined)
})
})
@@ -527,13 +505,7 @@ describe('AppSec Rule Manager', () => {
]
updateWafFromRC({ toUnapply: [], toApply, toModify: [] })
-
- expect(waf.update).not.to.have.been.called
- expect(blocking.updateBlockingConfiguration).to.have.been.calledOnceWithExactly(
- {
- id: 'block',
- otherParam: 'other'
- })
+ expect(waf.update).to.have.been.calledOnceWithExactly(asm)
})
it('should unapply blocking actions', () => {
@@ -557,8 +529,6 @@ describe('AppSec Rule Manager', () => {
}
]
updateWafFromRC({ toUnapply: [], toApply, toModify: [] })
- // reset counters
- blocking.updateBlockingConfiguration.reset()
const toUnapply = [
{
@@ -569,8 +539,7 @@ describe('AppSec Rule Manager', () => {
updateWafFromRC({ toUnapply, toApply: [], toModify: [] })
- expect(waf.update).not.to.have.been.called
- expect(blocking.updateBlockingConfiguration).to.have.been.calledOnceWithExactly(undefined)
+ expect(waf.update).to.have.been.calledOnceWithExactly(asm)
})
it('should ignore other properties', () => {
diff --git a/packages/dd-trace/test/appsec/sdk/user_blocking.spec.js b/packages/dd-trace/test/appsec/sdk/user_blocking.spec.js
index 3072b57122b..04d3da4647d 100644
--- a/packages/dd-trace/test/appsec/sdk/user_blocking.spec.js
+++ b/packages/dd-trace/test/appsec/sdk/user_blocking.spec.js
@@ -11,6 +11,14 @@ const path = require('path')
const waf = require('../../../src/appsec/waf')
const { USER_ID } = require('../../../src/appsec/addresses')
+const resultActions = {
+ block_request: {
+ status_code: '401',
+ type: 'auto',
+ grpc_status_code: '10'
+ }
+}
+
describe('user_blocking', () => {
describe('Internal API', () => {
const req = { protocol: 'https' }
@@ -21,8 +29,8 @@ describe('user_blocking', () => {
before(() => {
const runStub = sinon.stub(waf, 'run')
- runStub.withArgs({ persistent: { [USER_ID]: 'user' } }).returns(['block'])
- runStub.withArgs({ persistent: { [USER_ID]: 'gooduser' } }).returns([''])
+ runStub.withArgs({ persistent: { [USER_ID]: 'user' } }).returns(resultActions)
+ runStub.withArgs({ persistent: { [USER_ID]: 'gooduser' } }).returns({})
})
beforeEach(() => {
diff --git a/packages/dd-trace/test/ci-visibility/exporters/test-worker/exporter.spec.js b/packages/dd-trace/test/ci-visibility/exporters/test-worker/exporter.spec.js
index cb212eca891..3322fbb8e85 100644
--- a/packages/dd-trace/test/ci-visibility/exporters/test-worker/exporter.spec.js
+++ b/packages/dd-trace/test/ci-visibility/exporters/test-worker/exporter.spec.js
@@ -6,19 +6,23 @@ const TestWorkerCiVisibilityExporter = require('../../../../src/ci-visibility/ex
const {
JEST_WORKER_TRACE_PAYLOAD_CODE,
JEST_WORKER_COVERAGE_PAYLOAD_CODE,
- CUCUMBER_WORKER_TRACE_PAYLOAD_CODE
+ CUCUMBER_WORKER_TRACE_PAYLOAD_CODE,
+ MOCHA_WORKER_TRACE_PAYLOAD_CODE
} = require('../../../../src/plugins/util/test')
describe('CI Visibility Test Worker Exporter', () => {
let send, originalSend
+
beforeEach(() => {
send = sinon.spy()
originalSend = process.send
process.send = send
})
+
afterEach(() => {
process.send = originalSend
})
+
context('when the process is a jest worker', () => {
beforeEach(() => {
process.env.JEST_WORKER_ID = '1'
@@ -26,6 +30,7 @@ describe('CI Visibility Test Worker Exporter', () => {
afterEach(() => {
delete process.env.JEST_WORKER_ID
})
+
it('can export traces', () => {
const trace = [{ type: 'test' }]
const traceSecond = [{ type: 'test', name: 'other' }]
@@ -35,6 +40,7 @@ describe('CI Visibility Test Worker Exporter', () => {
jestWorkerExporter.flush()
expect(send).to.have.been.calledWith([JEST_WORKER_TRACE_PAYLOAD_CODE, JSON.stringify([trace, traceSecond])])
})
+
it('can export coverages', () => {
const coverage = { sessionId: '1', suiteId: '1', files: ['test.js'] }
const coverageSecond = { sessionId: '2', suiteId: '2', files: ['test2.js'] }
@@ -46,6 +52,7 @@ describe('CI Visibility Test Worker Exporter', () => {
[JEST_WORKER_COVERAGE_PAYLOAD_CODE, JSON.stringify([coverage, coverageSecond])]
)
})
+
it('does not break if process.send is undefined', () => {
delete process.send
const trace = [{ type: 'test' }]
@@ -55,6 +62,7 @@ describe('CI Visibility Test Worker Exporter', () => {
expect(send).not.to.have.been.called
})
})
+
context('when the process is a cucumber worker', () => {
beforeEach(() => {
process.env.CUCUMBER_WORKER_ID = '1'
@@ -62,6 +70,7 @@ describe('CI Visibility Test Worker Exporter', () => {
afterEach(() => {
delete process.env.CUCUMBER_WORKER_ID
})
+
it('can export traces', () => {
const trace = [{ type: 'test' }]
const traceSecond = [{ type: 'test', name: 'other' }]
@@ -71,6 +80,7 @@ describe('CI Visibility Test Worker Exporter', () => {
cucumberWorkerExporter.flush()
expect(send).to.have.been.calledWith([CUCUMBER_WORKER_TRACE_PAYLOAD_CODE, JSON.stringify([trace, traceSecond])])
})
+
it('does not break if process.send is undefined', () => {
delete process.send
const trace = [{ type: 'test' }]
@@ -80,4 +90,32 @@ describe('CI Visibility Test Worker Exporter', () => {
expect(send).not.to.have.been.called
})
})
+
+ context('when the process is a mocha worker', () => {
+ beforeEach(() => {
+ process.env.MOCHA_WORKER_ID = '1'
+ })
+ afterEach(() => {
+ delete process.env.MOCHA_WORKER_ID
+ })
+
+ it('can export traces', () => {
+ const trace = [{ type: 'test' }]
+ const traceSecond = [{ type: 'test', name: 'other' }]
+ const mochaWorkerExporter = new TestWorkerCiVisibilityExporter()
+ mochaWorkerExporter.export(trace)
+ mochaWorkerExporter.export(traceSecond)
+ mochaWorkerExporter.flush()
+ expect(send).to.have.been.calledWith([MOCHA_WORKER_TRACE_PAYLOAD_CODE, JSON.stringify([trace, traceSecond])])
+ })
+
+ it('does not break if process.send is undefined', () => {
+ delete process.send
+ const trace = [{ type: 'test' }]
+ const mochaWorkerExporter = new TestWorkerCiVisibilityExporter()
+ mochaWorkerExporter.export(trace)
+ mochaWorkerExporter.flush()
+ expect(send).not.to.have.been.called
+ })
+ })
})
diff --git a/packages/dd-trace/test/config.spec.js b/packages/dd-trace/test/config.spec.js
index 69bae6a4ec5..c19a8405515 100644
--- a/packages/dd-trace/test/config.spec.js
+++ b/packages/dd-trace/test/config.spec.js
@@ -77,6 +77,7 @@ describe('Config', () => {
process.env.DD_SERVICE = 'service'
process.env.OTEL_SERVICE_NAME = 'otel_service'
process.env.DD_TRACE_LOG_LEVEL = 'error'
+ process.env.DD_TRACE_DEBUG = 'false'
process.env.OTEL_LOG_LEVEL = 'debug'
process.env.DD_TRACE_SAMPLE_RATE = '0.5'
process.env.OTEL_TRACES_SAMPLER = 'traceidratio'
@@ -93,6 +94,7 @@ describe('Config', () => {
const config = new Config()
+ expect(config).to.have.property('debug', false)
expect(config).to.have.property('service', 'service')
expect(config).to.have.property('logLevel', 'error')
expect(config).to.have.property('sampleRate', 0.5)
@@ -109,7 +111,7 @@ describe('Config', () => {
it('should initialize with OTEL environment variables when DD env vars are not set', () => {
process.env.OTEL_SERVICE_NAME = 'otel_service'
- process.env.OTEL_LOG_LEVEL = 'warn'
+ process.env.OTEL_LOG_LEVEL = 'debug'
process.env.OTEL_TRACES_SAMPLER = 'traceidratio'
process.env.OTEL_TRACES_SAMPLER_ARG = '0.1'
process.env.OTEL_TRACES_EXPORTER = 'none'
@@ -119,8 +121,9 @@ describe('Config', () => {
const config = new Config()
+ expect(config).to.have.property('debug', true)
expect(config).to.have.property('service', 'otel_service')
- expect(config).to.have.property('logLevel', 'warn')
+ expect(config).to.have.property('logLevel', 'debug')
expect(config).to.have.property('sampleRate', 0.1)
expect(config).to.have.property('runtimeMetrics', false)
expect(config.tags).to.include({ foo: 'bar1', baz: 'qux1' })
@@ -208,6 +211,7 @@ describe('Config', () => {
expect(config).to.have.nested.property('experimental.enableGetRumData', false)
expect(config).to.have.nested.property('appsec.enabled', undefined)
expect(config).to.have.nested.property('appsec.rules', undefined)
+ expect(config).to.have.nested.property('appsec.rasp.enabled', false)
expect(config).to.have.nested.property('appsec.rateLimit', 100)
expect(config).to.have.nested.property('appsec.wafTimeout', 5e3)
expect(config).to.have.nested.property('appsec.obfuscatorKeyRegex').with.length(155)
@@ -249,6 +253,7 @@ describe('Config', () => {
value: '(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:\\s*=[^;]|"\\s*:\\s*"[^"]+")|bearer\\s+[a-z0-9\\._\\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\\w=-]+\\.ey[I-L][\\w=-]+(?:\\.[\\w.+\\/=-]+)?|[\\-]{5}BEGIN[a-z\\s]+PRIVATE\\sKEY[\\-]{5}[^\\-]+[\\-]{5}END[a-z\\s]+PRIVATE\\sKEY|ssh-rsa\\s*[a-z0-9\\/\\.+]{100,}',
origin: 'default'
},
+ { name: 'appsec.rasp.enabled', value: false, origin: 'default' },
{ name: 'appsec.rateLimit', value: 100, origin: 'default' },
{ name: 'appsec.rules', value: undefined, origin: 'default' },
{ name: 'appsec.sca.enabled', value: null, origin: 'default' },
@@ -290,9 +295,11 @@ describe('Config', () => {
{ name: 'peerServiceMapping', value: {}, origin: 'default' },
{ name: 'plugins', value: true, origin: 'default' },
{ name: 'port', value: '8126', origin: 'default' },
- { name: 'profiling.enabled', value: false, origin: 'default' },
+ { name: 'profiling.enabled', value: undefined, origin: 'default' },
{ name: 'profiling.exporters', value: 'agent', origin: 'default' },
+ { name: 'profiling.heuristicsEnabled', value: false, origin: 'default' },
{ name: 'profiling.sourceMap', value: true, origin: 'default' },
+ { name: 'profiling.ssi', value: false, origin: 'default' },
{ name: 'protocolVersion', value: '0.4', origin: 'default' },
{
name: 'queryStringObfuscation',
@@ -307,6 +314,7 @@ describe('Config', () => {
{ name: 'runtimeMetrics', value: false, origin: 'default' },
{ name: 'sampleRate', value: undefined, origin: 'default' },
{ name: 'sampler.rateLimit', value: undefined, origin: 'default' },
+ { name: 'sampler.rules', value: [], origin: 'default' },
{ name: 'scope', value: undefined, origin: 'default' },
{ name: 'service', value: 'node', origin: 'default' },
{ name: 'site', value: 'datadoghq.com', origin: 'default' },
@@ -411,6 +419,7 @@ describe('Config', () => {
process.env.DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED = 'true'
process.env.DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED = true
process.env.DD_APPSEC_ENABLED = 'true'
+ process.env.DD_APPSEC_RASP_ENABLED = 'true'
process.env.DD_APPSEC_RULES = RULES_JSON_PATH
process.env.DD_APPSEC_TRACE_RATE_LIMIT = '42'
process.env.DD_APPSEC_WAF_TIMEOUT = '42'
@@ -434,7 +443,8 @@ describe('Config', () => {
process.env.DD_IAST_TELEMETRY_VERBOSITY = 'DEBUG'
process.env.DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED = 'true'
process.env.DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED = 'true'
- process.env.DD_EXPERIMENTAL_PROFILING_ENABLED = 'true'
+ process.env.DD_PROFILING_ENABLED = 'true'
+ process.env.DD_INJECTION_ENABLED = 'profiler'
process.env.DD_API_SECURITY_ENABLED = 'true'
process.env.DD_API_SECURITY_REQUEST_SAMPLE_RATE = 1
process.env.DD_INSTRUMENTATION_INSTALL_ID = '68e75c48-57ca-4a12-adfc-575c4b05fcbe'
@@ -497,6 +507,7 @@ describe('Config', () => {
expect(config).to.have.nested.property('experimental.exporter', 'log')
expect(config).to.have.nested.property('experimental.enableGetRumData', true)
expect(config).to.have.nested.property('appsec.enabled', true)
+ expect(config).to.have.nested.property('appsec.rasp.enabled', true)
expect(config).to.have.nested.property('appsec.rules', RULES_JSON_PATH)
expect(config).to.have.nested.property('appsec.rateLimit', 42)
expect(config).to.have.nested.property('appsec.wafTimeout', 42)
@@ -535,6 +546,7 @@ describe('Config', () => {
{ name: 'appsec.enabled', value: true, origin: 'env_var' },
{ name: 'appsec.obfuscatorKeyRegex', value: '.*', origin: 'env_var' },
{ name: 'appsec.obfuscatorValueRegex', value: '.*', origin: 'env_var' },
+ { name: 'appsec.rasp.enabled', value: true, origin: 'env_var' },
{ name: 'appsec.rateLimit', value: 42, origin: 'env_var' },
{ name: 'appsec.rules', value: RULES_JSON_PATH, origin: 'env_var' },
{ name: 'appsec.sca.enabled', value: true, origin: 'env_var' },
@@ -562,6 +574,8 @@ describe('Config', () => {
{ name: 'peerServiceMapping', value: { c: 'cc', d: 'dd' }, origin: 'env_var' },
{ name: 'port', value: '6218', origin: 'env_var' },
{ name: 'profiling.enabled', value: true, origin: 'env_var' },
+ { name: 'profiling.heuristicsEnabled', value: true, origin: 'env_var' },
+ { name: 'profiling.ssi', value: true, origin: 'env_var' },
{ name: 'protocolVersion', value: '0.5', origin: 'env_var' },
{ name: 'queryStringObfuscation', value: '.*', origin: 'env_var' },
{ name: 'remoteConfig.enabled', value: false, origin: 'env_var' },
@@ -570,6 +584,16 @@ describe('Config', () => {
{ name: 'runtimeMetrics', value: true, origin: 'env_var' },
{ name: 'sampleRate', value: 0.5, origin: 'env_var' },
{ name: 'sampler.rateLimit', value: '-1', origin: 'env_var' },
+ {
+ name: 'sampler.rules',
+ value: [
+ { service: 'usersvc', name: 'healthcheck', sampleRate: 0.0 },
+ { service: 'usersvc', sampleRate: 0.5 },
+ { service: 'authsvc', sampleRate: 1.0 },
+ { sampleRate: 0.1 }
+ ],
+ origin: 'env_var'
+ },
{ name: 'service', value: 'service', origin: 'env_var' },
{ name: 'spanAttributeSchema', value: 'v1', origin: 'env_var' },
{ name: 'spanRemoveIntegrationFromService', value: true, origin: 'env_var' },
@@ -644,6 +668,12 @@ describe('Config', () => {
foo: 'bar'
}
const logLevel = 'error'
+ const samplingRules = [
+ { service: 'usersvc', name: 'healthcheck', sampleRate: 0.0 },
+ { service: 'usersvc', sampleRate: 0.5 },
+ { service: 'authsvc', sampleRate: 1.0 },
+ { sampleRate: 0.1 }
+ ]
const config = new Config({
enabled: false,
debug: true,
@@ -662,12 +692,7 @@ describe('Config', () => {
clientIpHeader: 'x-true-client-ip',
sampleRate: 0.5,
rateLimit: 1000,
- samplingRules: [
- { service: 'usersvc', name: 'healthcheck', sampleRate: 0.0 },
- { service: 'usersvc', sampleRate: 0.5 },
- { service: 'authsvc', sampleRate: 1.0 },
- { sampleRate: 0.1 }
- ],
+ samplingRules,
spanSamplingRules: [
{ service: 'mysql', name: 'mysql.query', sampleRate: 0.0, maxPerSecond: 1 },
{ service: 'mysql', sampleRate: 0.5 },
@@ -824,6 +849,7 @@ describe('Config', () => {
{ name: 'runtimeMetrics', value: true, origin: 'code' },
{ name: 'sampleRate', value: 0.5, origin: 'code' },
{ name: 'sampler.rateLimit', value: 1000, origin: 'code' },
+ { name: 'sampler.rules', value: samplingRules, origin: 'code' },
{ name: 'service', value: 'service', origin: 'code' },
{ name: 'site', value: 'datadoghq.eu', origin: 'code' },
{ name: 'spanAttributeSchema', value: 'v1', origin: 'code' },
@@ -985,6 +1011,7 @@ describe('Config', () => {
process.env.DD_TRACE_EXPERIMENTAL_GET_RUM_DATA_ENABLED = 'true'
process.env.DD_TRACE_EXPERIMENTAL_INTERNAL_ERRORS_ENABLED = 'true'
process.env.DD_APPSEC_ENABLED = 'false'
+ process.env.DD_APPSEC_RASP_ENABLED = 'true'
process.env.DD_APPSEC_RULES = RECOMMENDED_JSON_PATH
process.env.DD_APPSEC_TRACE_RATE_LIMIT = 11
process.env.DD_APPSEC_WAF_TIMEOUT = 11
@@ -1064,6 +1091,9 @@ describe('Config', () => {
apiSecurity: {
enabled: true,
requestSampling: 1.0
+ },
+ rasp: {
+ enabled: false
}
},
remoteConfig: {
@@ -1103,6 +1133,7 @@ describe('Config', () => {
expect(config).to.have.nested.property('experimental.exporter', 'agent')
expect(config).to.have.nested.property('experimental.enableGetRumData', false)
expect(config).to.have.nested.property('appsec.enabled', true)
+ expect(config).to.have.nested.property('appsec.rasp.enabled', false)
expect(config).to.have.nested.property('appsec.rules', RULES_JSON_PATH)
expect(config).to.have.nested.property('appsec.rateLimit', 42)
expect(config).to.have.nested.property('appsec.wafTimeout', 42)
@@ -1188,6 +1219,9 @@ describe('Config', () => {
},
sca: {
enabled: null
+ },
+ rasp: {
+ enabled: false
}
})
})
@@ -1441,6 +1475,27 @@ describe('Config', () => {
])
})
+ it('should remove tags from sampling rules when set through remote configuration', () => {
+ const config = new Config()
+
+ config.configure({
+ trace_sample_rules: [
+ {
+ resource: '*',
+ tags: [{ key: 'tag-a', value_glob: 'tag-a-val*' }],
+ provenance: 'customer'
+ }
+ ]
+ }, true)
+
+ expect(config).to.have.deep.nested.property('sampler', {
+ spanSamplingRules: [],
+ rateLimit: undefined,
+ rules: [{ resource: '*', provenance: 'customer' }],
+ sampleRate: undefined
+ })
+ })
+
it('should have consistent runtime-id after remote configuration updates tags', () => {
const config = new Config()
const runtimeId = config.tags['runtime-id']
diff --git a/packages/dd-trace/test/encode/0.4.spec.js b/packages/dd-trace/test/encode/0.4.spec.js
index cd20a318e4d..13d20250109 100644
--- a/packages/dd-trace/test/encode/0.4.spec.js
+++ b/packages/dd-trace/test/encode/0.4.spec.js
@@ -249,23 +249,13 @@ describe('encode', () => {
const decoded = msgpack.decode(buffer, { codec })
const trace = decoded[0]
- expect(trace[0].meta_struct).to.deep.equal(metaStruct)
- })
-
- it('should encode meta_struct with simple array of simple values', () => {
- const metaStruct = ['one', 2, 'three', 4, 5, 'six']
- data[0].meta_struct = metaStruct
- encoder.encode(data)
- const buffer = encoder.makePayload()
-
- const decoded = msgpack.decode(buffer, { codec })
- const trace = decoded[0]
- expect(trace[0].meta_struct).to.deep.equal(metaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.be.equal(metaStruct.foo)
+ expect(msgpack.decode(trace[0].meta_struct.baz)).to.be.equal(metaStruct.baz)
})
- it('should encode meta_struct with array of objects', () => {
- const metaStruct = [{ foo: 'bar' }, { baz: 123 }]
+ it('should ignore array in meta_struct', () => {
+ const metaStruct = ['one', 2, 'three', 4, 5, 'six']
data[0].meta_struct = metaStruct
encoder.encode(data)
@@ -273,7 +263,7 @@ describe('encode', () => {
const decoded = msgpack.decode(buffer, { codec })
const trace = decoded[0]
- expect(trace[0].meta_struct).to.deep.equal(metaStruct)
+ expect(trace[0].meta_struct).to.deep.equal({})
})
it('should encode meta_struct with empty object and array', () => {
@@ -288,7 +278,8 @@ describe('encode', () => {
const decoded = msgpack.decode(buffer, { codec })
const trace = decoded[0]
- expect(trace[0].meta_struct).to.deep.equal(metaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.deep.equal(metaStruct.foo)
+ expect(msgpack.decode(trace[0].meta_struct.bar)).to.deep.equal(metaStruct.bar)
})
it('should encode meta_struct with possible real use case', () => {
@@ -342,7 +333,7 @@ describe('encode', () => {
const decoded = msgpack.decode(buffer, { codec })
const trace = decoded[0]
- expect(trace[0].meta_struct).to.deep.equal(metaStruct)
+ expect(msgpack.decode(trace[0].meta_struct['_dd.stack'])).to.deep.equal(metaStruct['_dd.stack'])
})
it('should encode meta_struct ignoring circular references in objects', () => {
@@ -373,7 +364,7 @@ describe('encode', () => {
}
}
}
- expect(trace[0].meta_struct).to.deep.equal(expectedMetaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.deep.equal(expectedMetaStruct.foo)
})
it('should encode meta_struct ignoring circular references in arrays', () => {
@@ -398,7 +389,7 @@ describe('encode', () => {
bar: 'baz'
}]
}
- expect(trace[0].meta_struct).to.deep.equal(expectedMetaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.deep.equal(expectedMetaStruct.foo)
})
it('should encode meta_struct ignoring undefined properties', () => {
@@ -418,7 +409,8 @@ describe('encode', () => {
const expectedMetaStruct = {
foo: 'bar'
}
- expect(trace[0].meta_struct).to.deep.equal(expectedMetaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.deep.equal(expectedMetaStruct.foo)
+ expect(trace[0].meta_struct.undefinedProperty).to.be.undefined
})
it('should encode meta_struct ignoring null properties', () => {
@@ -438,7 +430,8 @@ describe('encode', () => {
const expectedMetaStruct = {
foo: 'bar'
}
- expect(trace[0].meta_struct).to.deep.equal(expectedMetaStruct)
+ expect(msgpack.decode(trace[0].meta_struct.foo)).to.deep.equal(expectedMetaStruct.foo)
+ expect(trace[0].meta_struct.nullProperty).to.be.undefined
})
it('should not encode null meta_struct', () => {
diff --git a/packages/dd-trace/test/flare.spec.js b/packages/dd-trace/test/flare.spec.js
new file mode 100644
index 00000000000..ac4133cd9e9
--- /dev/null
+++ b/packages/dd-trace/test/flare.spec.js
@@ -0,0 +1,161 @@
+'use strict'
+
+const Config = require('../src/config')
+const { channel } = require('dc-polyfill')
+const express = require('express')
+const getPort = require('get-port')
+const http = require('http')
+const upload = require('multer')()
+const proxyquire = require('proxyquire').noCallThru()
+
+require('./setup/tap')
+
+const debugChannel = channel('datadog:log:debug')
+
+describe('Flare', () => {
+ let flare
+ let startupLog
+ let tracerConfig
+ let task
+ let port
+ let server
+ let listener
+ let socket
+ let handler
+
+ const createServer = () => {
+ const app = express()
+
+ app.post('/tracer_flare/v1', upload.any(), (req, res) => {
+ res.sendStatus(200)
+ handler(req)
+ })
+
+ server = http.createServer(app)
+ server.on('connection', socket_ => {
+ socket = socket_
+ })
+
+ listener = server.listen(port)
+ }
+
+ beforeEach(() => {
+ startupLog = {
+ tracerInfo: () => ({
+ lang: 'nodejs'
+ })
+ }
+
+ flare = proxyquire('../src/flare', {
+ '../startup-log': startupLog
+ })
+
+ return getPort().then(port_ => {
+ port = port_
+ })
+ })
+
+ beforeEach(() => {
+ tracerConfig = new Config({
+ url: `http://127.0.0.1:${port}`
+ })
+
+ task = {
+ case_id: '111',
+ hostname: 'myhostname',
+ user_handle: 'user.name@datadoghq.com'
+ }
+
+ createServer()
+ })
+
+ afterEach(done => {
+ handler = null
+ flare.disable()
+ listener.close()
+ socket && socket.end()
+ server.on('close', () => {
+ server = null
+ listener = null
+ socket = null
+
+ done()
+ })
+ })
+
+ it('should send a flare', done => {
+ handler = req => {
+ try {
+ expect(req.body).to.include({
+ case_id: task.case_id,
+ hostname: task.hostname,
+ email: task.user_handle,
+ source: 'tracer_nodejs'
+ })
+
+ done()
+ } catch (e) {
+ done(e)
+ }
+ }
+
+ flare.enable(tracerConfig)
+ flare.send(task)
+ })
+
+ it('should send the tracer info', done => {
+ handler = req => {
+ try {
+ expect(req.files).to.have.length(1)
+ expect(req.files[0]).to.include({
+ fieldname: 'flare_file',
+ originalname: 'tracer_info.txt',
+ mimetype: 'application/octet-stream'
+ })
+
+ const content = JSON.parse(req.files[0].buffer.toString())
+
+ expect(content).to.have.property('lang', 'nodejs')
+
+ done()
+ } catch (e) {
+ done(e)
+ }
+ }
+
+ flare.enable(tracerConfig)
+ flare.send(task)
+ })
+
+ it('should send the tracer logs', done => {
+ handler = req => {
+ try {
+ const file = req.files[0]
+
+ if (file.originalname !== 'tracer_logs.txt') return
+
+ expect(file).to.include({
+ fieldname: 'flare_file',
+ originalname: 'tracer_logs.txt',
+ mimetype: 'application/octet-stream'
+ })
+
+ const content = file.buffer.toString()
+
+ expect(content).to.equal('foo\nbar\n')
+
+ done()
+ } catch (e) {
+ done(e)
+ }
+ }
+
+ flare.enable(tracerConfig)
+ flare.prepare('debug')
+
+ debugChannel.publish('foo')
+ debugChannel.publish('bar')
+
+ flare.send(task)
+ })
+})
diff --git a/packages/dd-trace/test/opentracing/propagation/text_map.spec.js b/packages/dd-trace/test/opentracing/propagation/text_map.spec.js
index cedf6a439a7..9bc86bc16ff 100644
--- a/packages/dd-trace/test/opentracing/propagation/text_map.spec.js
+++ b/packages/dd-trace/test/opentracing/propagation/text_map.spec.js
@@ -523,6 +523,34 @@ describe('TextMapPropagator', () => {
expect(spanContext._tracestate).to.be.undefined
})
+ it('extracts span_id from tracecontext headers and stores datadog parent-id in trace_distributed_tags', () => {
+ textMap['x-datadog-trace-id'] = '61185'
+ textMap['x-datadog-parent-id'] = '15'
+ textMap.traceparent = '00-0000000000000000000000000000ef01-0000000000011ef0-01'
+ config.tracePropagationStyle.extract = ['datadog', 'tracecontext']
+
+ const carrier = textMap
+ const spanContext = propagator.extract(carrier)
+ expect(parseInt(spanContext._spanId.toString(), 16)).to.equal(73456)
+ expect(parseInt(spanContext._traceId.toString(), 16)).to.equal(61185)
+ expect(spanContext._trace.tags).to.have.property('_dd.parent_id', '000000000000000f')
+ })
+
+ it('extracts span_id from tracecontext headers and stores p value from tracestate in trace_distributed_tags',
+ () => {
+ textMap['x-datadog-trace-id'] = '61185'
+ textMap['x-datadog-parent-id'] = '15'
+ textMap.traceparent = '00-0000000000000000000000000000ef01-0000000000011ef0-01'
+ textMap.tracestate = 'other=bleh,dd=p:0000000000000001;s:2;o:foo;t.dm:-4'
+ config.tracePropagationStyle.extract = ['datadog', 'tracecontext']
+
+ const carrier = textMap
+ const spanContext = propagator.extract(carrier)
+ expect(parseInt(spanContext._spanId.toString(), 16)).to.equal(73456)
+ expect(parseInt(spanContext._traceId.toString(), 16)).to.equal(61185)
+ expect(spanContext._trace.tags).to.have.property('_dd.parent_id', '0000000000000001')
+ })
+
describe('with B3 propagation as multiple headers', () => {
beforeEach(() => {
config.tracePropagationStyle.extract = ['b3multi']
diff --git a/packages/dd-trace/test/profiling/ssi-telemetry.spec.js b/packages/dd-trace/test/profiling/ssi-heuristics.spec.js
similarity index 65%
rename from packages/dd-trace/test/profiling/ssi-telemetry.spec.js
rename to packages/dd-trace/test/profiling/ssi-heuristics.spec.js
index 5e8d7df9d62..be46d3714f5 100644
--- a/packages/dd-trace/test/profiling/ssi-telemetry.spec.js
+++ b/packages/dd-trace/test/profiling/ssi-heuristics.spec.js
@@ -9,30 +9,31 @@ const telemetryManagerNamespace = sinon.stub()
telemetryManagerNamespace.returns()
const dc = require('dc-polyfill')
+const Config = require('../../src/config')
-describe('SSI Telemetry', () => {
+describe('SSI Heuristics', () => {
it('should be disabled without SSI even if the profiler is manually enabled', () => {
delete process.env.DD_INJECTION_ENABLED
process.env.DD_PROFILING_ENABLED = 'true'
- testDisabledTelemetry()
+ testDisabledHeuristics()
})
it('should be disabled when SSI is present but the profiler is manually disabled', () => {
process.env.DD_INJECTION_ENABLED = 'tracing'
process.env.DD_PROFILING_ENABLED = 'false'
- testDisabledTelemetry()
+ testDisabledHeuristics()
})
it('should be enabled when SSI is present', () => {
process.env.DD_INJECTION_ENABLED = 'tracing'
delete process.env.DD_PROFILING_ENABLED
- testEnabledTelemetry('not_enabled')
+ return testEnabledHeuristics('not_enabled')
})
it('should be enabled when SSI is present and profiling is manually enabled', () => {
process.env.DD_INJECTION_ENABLED = 'tracing'
process.env.DD_PROFILING_ENABLED = 'true'
- testEnabledTelemetry('manually_enabled')
+ return testEnabledHeuristics('manually_enabled')
})
})
@@ -54,7 +55,7 @@ function setupHarness () {
}
const namespaceFn = sinon.stub().returns(ssiMetricsNamespace)
- const { SSITelemetry, EnablementChoice } = proxyquire('../src/profiling/ssi-telemetry', {
+ const { SSIHeuristics, EnablementChoice } = proxyquire('../src/profiling/ssi-heuristics', {
'../telemetry/metrics': {
manager: {
namespace: namespaceFn
@@ -67,24 +68,24 @@ function setupHarness () {
runtimeIdCountInc: runtimeIdCount.inc,
count: ssiMetricsNamespace.count
}
- return { stubs, SSITelemetry, EnablementChoice }
+ return { stubs, SSIHeuristics, EnablementChoice }
}
-function testDisabledTelemetry () {
- const { stubs, SSITelemetry, EnablementChoice } = setupHarness()
- const telemetry = new SSITelemetry()
- telemetry.start()
+function testDisabledHeuristics () {
+ const { stubs, SSIHeuristics, EnablementChoice } = setupHarness()
+ const heuristics = new SSIHeuristics(new Config().profiling)
+ heuristics.start()
dc.channel('dd-trace:span:start').publish()
dc.channel('datadog:profiling:profile-submitted').publish()
dc.channel('datadog:profiling:mock-profile-submitted').publish()
dc.channel('datadog:telemetry:app-closing').publish()
- expect(telemetry.enablementChoice).to.equal(EnablementChoice.DISABLED)
- expect(telemetry.enabled()).to.equal(false)
+ expect(heuristics.enablementChoice).to.equal(EnablementChoice.DISABLED)
+ expect(heuristics.enabled()).to.equal(false)
// When it is disabled, the telemetry should not subscribe to any channel
// so the preceding publishes should not have any effect.
- expect(telemetry._profileCount).to.equal(undefined)
- expect(telemetry.hasSentProfiles).to.equal(false)
- expect(telemetry.noSpan).to.equal(true)
+ expect(heuristics._profileCount).to.equal(undefined)
+ expect(heuristics.hasSentProfiles).to.equal(false)
+ expect(heuristics.noSpan).to.equal(true)
expect(stubs.count.notCalled).to.equal(true)
}
@@ -96,16 +97,25 @@ function executeTelemetryEnabledScenario (
heuristicDecision,
longLived = false
) {
- const { stubs, SSITelemetry } = setupHarness()
- const telemetry = longLived ? new SSITelemetry({ shortLivedThreshold: 2 }) : new SSITelemetry()
- telemetry.start()
- expect(telemetry.enabled()).to.equal(true)
+ const { stubs, SSIHeuristics } = setupHarness()
+ const config = new Config()
if (longLived) {
- for (const now = new Date().getTime(); new Date().getTime() - now < 3;);
+ config.profiling.longLivedThreshold = 2
}
- scenario(telemetry)
+ const heuristics = new SSIHeuristics(config.profiling)
+ heuristics.start()
+ expect(heuristics.enabled()).to.equal(true)
- createAndCheckMetrics(stubs, profileCount, sentProfiles, enablementChoice, heuristicDecision)
+ function runScenarioAndCheck () {
+ scenario(heuristics)
+ createAndCheckMetrics(stubs, profileCount, sentProfiles, enablementChoice, heuristicDecision)
+ }
+
+ if (longLived) {
+ return new Promise(resolve => setTimeout(resolve, 3)).then(runScenarioAndCheck)
+ } else {
+ runScenarioAndCheck()
+ }
}
function createAndCheckMetrics (stubs, profileCount, sentProfiles, enablementChoice, heuristicDecision) {
@@ -124,13 +134,12 @@ function createAndCheckMetrics (stubs, profileCount, sentProfiles, enablementCho
expect(stubs.runtimeIdCountInc.args.length).to.equal(1)
}
-function testEnabledTelemetry (enablementChoice) {
+function testEnabledHeuristics (enablementChoice) {
testNoOp(enablementChoice)
testProfilesSent(enablementChoice)
testMockProfilesSent(enablementChoice)
testSpan(enablementChoice)
- testLongLived(enablementChoice)
- testTriggered(enablementChoice)
+ return testLongLived(enablementChoice).then(() => testTriggered(enablementChoice))
}
function testNoOp (enablementChoice) {
@@ -152,23 +161,37 @@ function testMockProfilesSent (enablementChoice) {
}
function testSpan (enablementChoice) {
- executeTelemetryEnabledScenario(telemetry => {
+ executeTelemetryEnabledScenario(heuristics => {
dc.channel('dd-trace:span:start').publish()
- expect(telemetry.noSpan).to.equal(false)
+ expect(heuristics.noSpan).to.equal(false)
dc.channel('datadog:profiling:profile-submitted').publish()
}, 1, true, enablementChoice, 'short_lived')
}
function testLongLived (enablementChoice) {
- executeTelemetryEnabledScenario(_ => {
+ let callbackInvoked = false
+ return executeTelemetryEnabledScenario(heuristics => {
+ heuristics.onTriggered(() => {
+ callbackInvoked = true
+ heuristics.onTriggered()
+ })
dc.channel('datadog:profiling:profile-submitted').publish()
- }, 1, true, enablementChoice, 'no_span', true)
+ }, 1, true, enablementChoice, 'no_span', true).then(() => {
+ expect(callbackInvoked).to.equal(false)
+ })
}
function testTriggered (enablementChoice) {
- executeTelemetryEnabledScenario(telemetry => {
+ let callbackInvoked = false
+ return executeTelemetryEnabledScenario(heuristics => {
+ heuristics.onTriggered(() => {
+ callbackInvoked = true
+ heuristics.onTriggered()
+ })
dc.channel('dd-trace:span:start').publish()
- expect(telemetry.noSpan).to.equal(false)
+ expect(heuristics.noSpan).to.equal(false)
dc.channel('datadog:profiling:profile-submitted').publish()
- }, 1, true, enablementChoice, 'triggered', true)
+ }, 1, true, enablementChoice, 'triggered', true).then(() => {
+ expect(callbackInvoked).to.equal(true)
+ })
}
diff --git a/packages/dd-trace/test/proxy.spec.js b/packages/dd-trace/test/proxy.spec.js
index 3bfef7474b3..07fcd41eca6 100644
--- a/packages/dd-trace/test/proxy.spec.js
+++ b/packages/dd-trace/test/proxy.spec.js
@@ -26,6 +26,7 @@ describe('TracerProxy', () => {
let iast
let PluginManager
let pluginManager
+ let flare
let remoteConfig
let rc
let dogStatsD
@@ -156,6 +157,14 @@ describe('TracerProxy', () => {
disable: sinon.spy()
}
+ flare = {
+ enable: sinon.spy(),
+ disable: sinon.spy(),
+ prepare: sinon.spy(),
+ send: sinon.spy(),
+ cleanup: sinon.spy()
+ }
+
remoteConfig = {
enable: sinon.stub()
}
@@ -184,7 +193,8 @@ describe('TracerProxy', () => {
'./appsec/remote_config': remoteConfig,
'./appsec/sdk': AppsecSdk,
'./dogstatsd': dogStatsD,
- './noop/dogstatsd': NoopDogStatsDClient
+ './noop/dogstatsd': NoopDogStatsDClient,
+ './flare': flare
})
proxy = new Proxy()
@@ -249,6 +259,57 @@ describe('TracerProxy', () => {
expect(pluginManager.configure).to.have.been.calledWith(config)
})
+ it('should support enabling debug logs for tracer flares', () => {
+ const logLevel = 'debug'
+
+ proxy.init()
+
+ rc.emit('AGENT_CONFIG', 'apply', {
+ config: {
+ log_level: logLevel
+ },
+ name: 'flare-log-level.debug'
+ })
+
+ expect(flare.enable).to.have.been.calledWith(config)
+ expect(flare.prepare).to.have.been.calledWith(logLevel)
+ })
+
+ it('should support sending tracer flares', () => {
+ const task = {
+ case_id: '111',
+ hostname: 'myhostname',
+ user_handle: 'user.name@datadoghq.com'
+ }
+
+ proxy.init()
+
+ rc.emit('AGENT_TASK', 'apply', {
+ args: task,
+ task_type: 'tracer_flare',
+ uuid: 'd53fc8a4-8820-47a2-aa7d-d565582feb81'
+ })
+
+ expect(flare.enable).to.have.been.calledWith(config)
+ expect(flare.send).to.have.been.calledWith(task)
+ })
+
+ it('should cleanup flares when the config is removed', () => {
+ const conf = {
+ config: {
+ log_level: 'debug'
+ },
+ name: 'flare-log-level.debug'
+ }
+
+ proxy.init()
+
+ rc.emit('AGENT_CONFIG', 'apply', conf)
+ rc.emit('AGENT_CONFIG', 'unapply', conf)
+
+ expect(flare.disable).to.have.been.called
+ })
+
it('should support applying remote config', () => {
const RemoteConfigProxy = proxyquire('../src/proxy', {
'./tracer': DatadogTracer,
diff --git a/packages/dd-trace/test/ritm.spec.js b/packages/dd-trace/test/ritm.spec.js
index e05eeb32a50..df2a4e8b1a4 100644
--- a/packages/dd-trace/test/ritm.spec.js
+++ b/packages/dd-trace/test/ritm.spec.js
@@ -4,37 +4,79 @@ require('./setup/tap')
const dc = require('dc-polyfill')
const { assert } = require('chai')
+const Module = require('module')
const Hook = require('../src/ritm')
-const moduleLoadStartChannel = dc.channel('dd-trace:moduleLoadStart')
-const moduleLoadEndChannel = dc.channel('dd-trace:moduleLoadEnd')
-
describe('Ritm', () => {
- it('should shim util', () => {
- const startListener = sinon.fake()
- const endListener = sinon.fake()
+ let moduleLoadStartChannel, moduleLoadEndChannel, startListener, endListener
+ let utilHook, aHook, bHook, httpHook
+
+ before(() => {
+ moduleLoadStartChannel = dc.channel('dd-trace:moduleLoadStart')
+ moduleLoadEndChannel = dc.channel('dd-trace:moduleLoadEnd')
+ })
+
+ beforeEach(() => {
+ startListener = sinon.fake()
+ endListener = sinon.fake()
moduleLoadStartChannel.subscribe(startListener)
moduleLoadEndChannel.subscribe(endListener)
- Hook('util')
- require('util')
+ Module.prototype.require = new Proxy(Module.prototype.require, {
+ apply (target, thisArg, argArray) {
+ if (argArray[0] === '@azure/functions-core') {
+ return {
+ version: '1.0.0',
+ registerHook: () => { }
+ }
+ } else {
+ return Reflect.apply(target, thisArg, argArray)
+ }
+ }
+ })
+
+ utilHook = Hook('util')
+ aHook = Hook('module-a')
+ bHook = Hook('module-b')
+ httpHook = new Hook(['http'], function onRequire (exports, name, basedir) {
+ exports.foo = 1
+ return exports
+ })
+ })
+
+ afterEach(() => {
+ utilHook.unhook()
+ aHook.unhook()
+ bHook.unhook()
+ httpHook.unhook()
+ })
+
+ it('should shim util', () => {
+ require('util')
assert.equal(startListener.callCount, 1)
assert.equal(endListener.callCount, 1)
})
it('should handle module load cycles', () => {
- const startListener = sinon.fake()
- const endListener = sinon.fake()
-
- moduleLoadStartChannel.subscribe(startListener)
- moduleLoadEndChannel.subscribe(endListener)
- Hook('module-a')
- Hook('module-b')
const { a } = require('./ritm-tests/module-a')
-
assert.equal(startListener.callCount, 2)
assert.equal(endListener.callCount, 2)
assert.equal(a(), 'Called by AJ')
})
+
+ it('should fall back to monkey patched module', () => {
+ assert.equal(require('http').foo, 1, 'normal hooking still works')
+
+ const fnCore = require('@azure/functions-core')
+ assert.ok(fnCore, 'requiring monkey patched in module works')
+ assert.equal(fnCore.version, '1.0.0')
+ assert.equal(typeof fnCore.registerHook, 'function')
+
+ assert.throws(
+ () => require('package-does-not-exist'),
+ 'Cannot find module \'package-does-not-exist\'',
+ 'a failing `require(...)` can still throw as expected'
+ )
+ })
})
diff --git a/packages/dd-trace/test/setup/core.js b/packages/dd-trace/test/setup/core.js
index f7d32157a99..ab1e6531089 100644
--- a/packages/dd-trace/test/setup/core.js
+++ b/packages/dd-trace/test/setup/core.js
@@ -5,6 +5,29 @@ const chai = require('chai')
const sinonChai = require('sinon-chai')
const proxyquire = require('../proxyquire')
+{
+ // get-port can often return a port that is already in use, thanks to a race
+ // condition. This patch adds a retry for 10 iterations, which should be
+ // enough to avoid flaky tests. The patch is added here in the require cache
+ // because it's used in all sorts of places.
+ const getPort = require('get-port')
+ require.cache[require.resolve('get-port')].exports = async function (...args) {
+ let tries = 0
+ let err = null
+ while (tries++ < 10) {
+ try {
+ return await getPort(...args)
+ } catch (e) {
+ if (e.code !== 'EADDRINUSE') {
+ throw e
+ }
+ err = e
+ }
+ }
+ throw err
+ }
+}
+
chai.use(sinonChai)
chai.use(require('../asserts/profile'))
diff --git a/packages/dd-trace/test/setup/mocha.js b/packages/dd-trace/test/setup/mocha.js
index 5a770ec8a1a..15131c2946d 100644
--- a/packages/dd-trace/test/setup/mocha.js
+++ b/packages/dd-trace/test/setup/mocha.js
@@ -26,7 +26,11 @@ function loadInst (plugin) {
loadInstFile(`${plugin}/server.js`, instrumentations)
loadInstFile(`${plugin}/client.js`, instrumentations)
} catch (e) {
- loadInstFile(`${plugin}.js`, instrumentations)
+ try {
+ loadInstFile(`${plugin}/main.js`, instrumentations)
+ } catch (e) {
+ loadInstFile(`${plugin}.js`, instrumentations)
+ }
}
return instrumentations
@@ -143,10 +147,12 @@ function withNamingSchema (
function withPeerService (tracer, pluginName, spanGenerationFn, service, serviceSource, opts = {}) {
describe('peer service computation' + (opts.desc ? ` ${opts.desc}` : ''), () => {
let computePeerServiceSpy
+
beforeEach(() => {
const plugin = tracer()._pluginManager._pluginsByName[pluginName]
computePeerServiceSpy = sinon.stub(plugin._tracerConfig, 'spanComputePeerService').value(true)
})
+
afterEach(() => {
computePeerServiceSpy.restore()
})
diff --git a/packages/dd-trace/test/telemetry/index.spec.js b/packages/dd-trace/test/telemetry/index.spec.js
index 75d0584b343..0acaa7f883a 100644
--- a/packages/dd-trace/test/telemetry/index.spec.js
+++ b/packages/dd-trace/test/telemetry/index.spec.js
@@ -386,6 +386,36 @@ describe('Telemetry extended heartbeat', () => {
telemetry.updateConfig(changeNeedingNameRemapping, config)
clock.tick(86400000)
expect(configuration).to.deep.equal(expectedConfigList)
+
+ const samplingRule = [
+ {
+ name: 'sampler.rules', // one of the config names that require a remapping
+ value: [
+ { service: '*', sampling_rate: 1 },
+ {
+ service: 'svc*',
+ resource: '*abc',
+ name: 'op-??',
+ tags: { 'tag-a': 'ta-v*', 'tag-b': 'tb-v?', 'tag-c': 'tc-v' },
+ sample_rate: 0.5
+ }
+ ],
+ origin: 'code'
+ }
+ ]
+ const expectedConfigListWithSamplingRules =
+ expectedConfigList.concat([
+ {
+ name: 'DD_TRACE_SAMPLING_RULES',
+ value:
+ // eslint-disable-next-line max-len
+ '[{"service":"*","sampling_rate":1},{"service":"svc*","resource":"*abc","name":"op-??","tags":{"tag-a":"ta-v*","tag-b":"tb-v?","tag-c":"tc-v"},"sample_rate":0.5}]',
+ origin: 'code'
+ }
+ ])
+ telemetry.updateConfig(samplingRule, config)
+ clock.tick(86400000)
+ expect(configuration).to.deep.equal(expectedConfigListWithSamplingRules)
done()
})
})
diff --git a/yarn.lock b/yarn.lock
index 29daf95f359..fcbb9cd6c3f 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -412,10 +412,10 @@
resolved "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz"
integrity "sha1-u1BFecHK6SPmV2pPXaQ9Jfl729k= sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ=="
-"@datadog/native-appsec@7.1.1":
- version "7.1.1"
- resolved "https://registry.yarnpkg.com/@datadog/native-appsec/-/native-appsec-7.1.1.tgz#eee96ae4c309e5b811611e968668f6744452c584"
- integrity sha512-1XVrCY4g1ArN79SQANMtiIkaxKSPfgdAGv0VAM4Pz+NQuxKfl+2xQPXjQPm87LI1KQIO6MU6qzv3sUUSesb9lA==
+"@datadog/native-appsec@8.0.1":
+ version "8.0.1"
+ resolved "https://registry.yarnpkg.com/@datadog/native-appsec/-/native-appsec-8.0.1.tgz#be06be92d79d7462aa64ee3a33108133083134fc"
+ integrity sha512-SpWkoo7K4+pwxFze1ogRF1qBaKm8sZjWfZKnQ8Ex67f6L5odLjWOoiiIAs5rp01sLKGXjxU8IJf+X9j4PvI2zQ==
dependencies:
node-gyp-build "^3.9.0"
@@ -726,10 +726,10 @@
"@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0"
-"@opentelemetry/api@^1.0.0":
- version "1.4.1"
- resolved "https://registry.npmjs.org/@opentelemetry/api/-/api-1.4.1.tgz"
- integrity "sha1-/yLrLl1Hb7wkUKGW5A3SQ8wgwo8= sha512-O2yRJce1GOc6PAy3QxFM4NzFiWzvScDC1/5ihYBL6BUEVdq0XMWN01sppE+H6bBXbaFYipjwFLEWLg5PaSOThA=="
+"@opentelemetry/api@>=1.0.0 <1.9.0":
+ version "1.8.0"
+ resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.8.0.tgz#5aa7abb48f23f693068ed2999ae627d2f7d902ec"
+ integrity sha512-I/s6F7yKUDdtMsoBWXJe8Qz40Tui5vsuKCWJEWVL+5q9sSWRzzx6v2KeNsOBEwd94j0eWkpWCH4yB6rZg9Mf0w==
"@opentelemetry/core@^1.14.0":
version "1.14.0"
@@ -2919,11 +2919,6 @@ ipaddr.js@1.9.1:
resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
-ipaddr.js@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz"
- integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==
-
is-arguments@^1.0.4, is-arguments@^1.1.1:
version "1.1.1"
resolved "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz"
@@ -3506,7 +3501,7 @@ merge2@^1.3.0, merge2@^1.4.1:
resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz"
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
-methods@^1.1.2, methods@~1.1.2:
+methods@~1.1.2:
version "1.1.2"
resolved "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz"
integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==
@@ -3709,11 +3704,6 @@ nock@^11.3.3:
mkdirp "^0.5.0"
propagate "^2.0.0"
-node-abort-controller@^3.1.1:
- version "3.1.1"
- resolved "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz"
- integrity sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==
-
node-addon-api@^6.1.0:
version "6.1.0"
resolved "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz"