diff --git a/.circleci/config.yml b/.circleci/config.yml
index 35827b8042436..003efda4d779f 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -15,7 +15,7 @@ experimental:
templates:
job_template: &job_template
docker:
- - image: gcr.io/datadoghq/agent-circleci-runner:v48262719-bfb00f80
+ - image: gcr.io/datadoghq/agent-circleci-runner:v48372186-ff395e52
environment:
USE_SYSTEM_LIBS: "1"
working_directory: /go/src/github.com/DataDog/datadog-agent
diff --git a/.ddqa/config.toml b/.ddqa/config.toml
index 25324ad07625f..eac6d757e5282 100644
--- a/.ddqa/config.toml
+++ b/.ddqa/config.toml
@@ -215,8 +215,8 @@ github_labels = ["team/container-app"]
jira_project = "INPLAT"
jira_issue_type = "Task"
jira_statuses = ["Selected For Development", "In Progress", "Done"]
-github_team = "apm-onboarding"
-github_labels = ["team/apm-onboarding"]
+github_team = "injection-platform"
+github_labels = ["team/injection-platform"]
[teams."Agent Release Management"]
jira_project = "AGNTR"
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 7b2acde0f3863..cd9cafea971f8 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -44,6 +44,8 @@
/service.datadog.yaml @DataDog/agent-delivery
/static-analysis.datadog.yml @DataDog/software-integrity-and-trust @DataDog/agent-devx-infra
+/modules.yml @DataDog/agent-shared-components
+
/.circleci/ @DataDog/agent-devx-infra
/.github/CODEOWNERS # do not notify anyone
@@ -99,7 +101,7 @@
/.gitlab/package_deps_build/package_deps_build.yml @DataDog/agent-devx-infra @DataDog/ebpf-platform
/.gitlab/powershell_script_signing/powershell_script_signing.yml @DataDog/agent-delivery @DataDog/windows-agent
/.gitlab/source_test/golang_deps_diff.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops
-/.gitlab/source_test/include.yml @DataDog/agent-devx-infra
+/.gitlab/source_test/* @DataDog/agent-devx-infra
/.gitlab/source_test/linux.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops
/.gitlab/source_test/macos.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops
/.gitlab/source_test/notify.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops
@@ -366,7 +368,7 @@
/pkg/clusteragent/ @DataDog/container-platform
/pkg/clusteragent/autoscaling/ @DataDog/container-integrations
/pkg/clusteragent/admission/mutate/autoscaling @DataDog/container-integrations
-/pkg/clusteragent/admission/mutate/autoinstrumentation/ @DataDog/container-platform @DataDog/apm-onboarding
+/pkg/clusteragent/admission/mutate/autoinstrumentation/ @DataDog/container-platform @DataDog/injection-platform
/pkg/clusteragent/orchestrator/ @DataDog/container-app
/pkg/clusteragent/telemetry/ @DataDog/apm-trace-storage
/pkg/collector/ @DataDog/agent-metrics-logs
@@ -444,6 +446,7 @@
/pkg/util/ecs/ @DataDog/container-integrations
/pkg/util/funcs/ @DataDog/ebpf-platform
/pkg/util/kernel/ @DataDog/ebpf-platform
+/pkg/util/safeelf/ @DataDog/ebpf-platform
/pkg/util/ktime @DataDog/agent-security
/pkg/util/kubernetes/ @DataDog/container-integrations @DataDog/container-platform @DataDog/container-app
/pkg/util/podman/ @DataDog/container-integrations
@@ -565,6 +568,7 @@
/tasks/libs/ciproviders/ @DataDog/agent-devx-infra
/tasks/libs/common/omnibus.py @DataDog/agent-delivery
/tasks/omnibus.py @DataDog/agent-delivery
+/tasks/release.py @DataDog/agent-delivery
/tasks/unit_tests/components_tests.py @DataDog/agent-shared-components
/tasks/unit_tests/omnibus_tests.py @DataDog/agent-delivery
/tasks/unit_tests/testdata/components_src/ @DataDog/agent-shared-components
@@ -611,7 +615,7 @@
/test/new-e2e/tests/otel @DataDog/opentelemetry
/test/new-e2e/tests/process @DataDog/processes
/test/new-e2e/tests/sysprobe-functional @DataDog/windows-kernel-integrations
-/test/new-e2e/tests/security-agent-functional @DataDog/windows-kernel-integrations
+/test/new-e2e/tests/security-agent-functional @DataDog/windows-kernel-integrations @DataDog/agent-security
/test/new-e2e/tests/cws @DataDog/agent-security
/test/new-e2e/tests/agent-metrics-logs @DataDog/agent-metrics-logs
/test/new-e2e/tests/windows @DataDog/windows-agent @DataDog/windows-kernel-integrations
diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml
index 1545dce79b194..ae15581d3cdea 100644
--- a/.github/workflows/create_rc_pr.yml
+++ b/.github/workflows/create_rc_pr.yml
@@ -88,7 +88,11 @@ jobs:
MATRIX: ${{ matrix.value }}
WARNING: ${{ needs.find_release_branches.outputs.warning }}
run: |
- echo "CHANGES=$(inv -e release.check-for-changes -r "$MATRIX" "$WARNING")" >> $GITHUB_OUTPUT
+ if [ -n "${{ needs.find_release_branches.outputs.warning }}" ]; then
+ echo "CHANGES=$(inv -e release.check-for-changes -r "$MATRIX" "$WARNING")" >> $GITHUB_OUTPUT
+ else
+ echo "CHANGES=$(inv -e release.check-for-changes -r "$MATRIX")" >> $GITHUB_OUTPUT
+ fi
- name: Create RC PR
if: ${{ steps.check_for_changes.outputs.CHANGES == 'true'}}
diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml
index 639020761ef48..08d0ad720c1da 100644
--- a/.github/workflows/cws-btfhub-sync.yml
+++ b/.github/workflows/cws-btfhub-sync.yml
@@ -83,11 +83,18 @@ jobs:
echo "ARTIFACT_NAME=constants-${{ matrix.cone }}" | tr '/' '-' >> $GITHUB_OUTPUT
- name: Sync constants
+ if: ${{ !inputs.force_refresh }}
env:
ARTIFACT_NAME: ${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
- FORCE_REFRESH: ${{ inputs.force_refresh && '--force-refresh' || '' }}
run: |
- inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive --output-path=./"$ARTIFACT_NAME".json "$FORCE_REFRESH"
+ inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive --output-path=./"$ARTIFACT_NAME".json
+
+ - name: Force sync constants
+ if: ${{ inputs.force_refresh }}
+ env:
+ ARTIFACT_NAME: ${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
+ run: |
+ inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive --output-path=./"$ARTIFACT_NAME".json --force-refresh
- name: Upload artifact
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
@@ -106,7 +113,6 @@ jobs:
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
with:
ref: ${{ inputs.base_branch || 'main' }}
- persist-credentials: false
- name: Install python
uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
@@ -158,8 +164,8 @@ jobs:
title: 'CWS: sync BTFHub constants',
owner,
repo,
- head: "$BRANCH_NAME",
- base: "$BASE_BRANCH",
+ head: process.env.BRANCH_NAME,
+ base: process.env.BASE_BRANCH,
body: [
'### What does this PR do?',
'This PR syncs the BTFHub constants used by CWS',
diff --git a/.github/workflows/go_mod_tidy.yml b/.github/workflows/go_mod_tidy.yml
index 581548a7b4575..e48d806c9dce9 100644
--- a/.github/workflows/go_mod_tidy.yml
+++ b/.github/workflows/go_mod_tidy.yml
@@ -20,7 +20,6 @@ jobs:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
with:
ref: ${{ github.head_ref }}
- persist-credentials: false
- name: Checkout PR
# run only if triggered manually, otherwise we are already on the right branch and we won't have `pr_number`
if: ${{ github.event_name == 'workflow_dispatch' }}
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ab738a582229a..fab353404816d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -172,49 +172,49 @@ variables:
# To use images from datadog-agent-buildimages dev branches, set the corresponding
# SUFFIX variable to _test_only
DATADOG_AGENT_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_BUILDIMAGES: v48262719-bfb00f80
+ DATADOG_AGENT_BUILDIMAGES: v48372186-ff395e52
DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_WINBUILDIMAGES: v48262719-bfb00f80
+ DATADOG_AGENT_WINBUILDIMAGES: v48372186-ff395e52
DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_ARMBUILDIMAGES: v48262719-bfb00f80
+ DATADOG_AGENT_ARMBUILDIMAGES: v48372186-ff395e52
DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v48262719-bfb00f80
+ DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v48372186-ff395e52
DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v48262719-bfb00f80
+ DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v48372186-ff395e52
# New images to enable different version per image - not used yet
- CI_IMAGE_BTF_GEN: v48262719-bfb00f80
+ CI_IMAGE_BTF_GEN: v48372186-ff395e52
CI_IMAGE_BTF_GEN_SUFFIX: ""
- CI_IMAGE_DEB_X64: v48262719-bfb00f80
+ CI_IMAGE_DEB_X64: v48372186-ff395e52
CI_IMAGE_DEB_X64_SUFFIX: ""
- CI_IMAGE_DEB_ARM64: v48262719-bfb00f80
+ CI_IMAGE_DEB_ARM64: v48372186-ff395e52
CI_IMAGE_DEB_ARM64_SUFFIX: ""
- CI_IMAGE_DEB_ARMHF: v48262719-bfb00f80
+ CI_IMAGE_DEB_ARMHF: v48372186-ff395e52
CI_IMAGE_DEB_ARMHF_SUFFIX: ""
- CI_IMAGE_DD_AGENT_TESTING: v48262719-bfb00f80
+ CI_IMAGE_DD_AGENT_TESTING: v48372186-ff395e52
CI_IMAGE_DD_AGENT_TESTING_SUFFIX: ""
- CI_IMAGE_DOCKER_X64: v48262719-bfb00f80
+ CI_IMAGE_DOCKER_X64: v48372186-ff395e52
CI_IMAGE_DOCKER_X64_SUFFIX: ""
- CI_IMAGE_DOCKER_ARM64: v48262719-bfb00f80
+ CI_IMAGE_DOCKER_ARM64: v48372186-ff395e52
CI_IMAGE_DOCKER_ARM64_SUFFIX: ""
- CI_IMAGE_GITLAB_AGENT_DEPLOY: v48262719-bfb00f80
+ CI_IMAGE_GITLAB_AGENT_DEPLOY: v48372186-ff395e52
CI_IMAGE_GITLAB_AGENT_DEPLOY_SUFFIX: ""
- CI_IMAGE_LINUX_GLIBC_2_17_X64: v48262719-bfb00f80
+ CI_IMAGE_LINUX_GLIBC_2_17_X64: v48372186-ff395e52
CI_IMAGE_LINUX_GLIBC_2_17_X64_SUFFIX: ""
- CI_IMAGE_LINUX_GLIBC_2_23_ARM64: v48262719-bfb00f80
+ CI_IMAGE_LINUX_GLIBC_2_23_ARM64: v48372186-ff395e52
CI_IMAGE_LINUX_GLIBC_2_23_ARM64_SUFFIX: ""
- CI_IMAGE_SYSTEM_PROBE_X64: v48262719-bfb00f80
+ CI_IMAGE_SYSTEM_PROBE_X64: v48372186-ff395e52
CI_IMAGE_SYSTEM_PROBE_X64_SUFFIX: ""
- CI_IMAGE_SYSTEM_PROBE_ARM64: v48262719-bfb00f80
+ CI_IMAGE_SYSTEM_PROBE_ARM64: v48372186-ff395e52
CI_IMAGE_SYSTEM_PROBE_ARM64_SUFFIX: ""
- CI_IMAGE_RPM_X64: v48262719-bfb00f80
+ CI_IMAGE_RPM_X64: v48372186-ff395e52
CI_IMAGE_RPM_X64_SUFFIX: ""
- CI_IMAGE_RPM_ARM64: v48262719-bfb00f80
+ CI_IMAGE_RPM_ARM64: v48372186-ff395e52
CI_IMAGE_RPM_ARM64_SUFFIX: ""
- CI_IMAGE_RPM_ARMHF: v48262719-bfb00f80
+ CI_IMAGE_RPM_ARMHF: v48372186-ff395e52
CI_IMAGE_RPM_ARMHF_SUFFIX: ""
- CI_IMAGE_WIN_1809_X64: v48262719-bfb00f80
+ CI_IMAGE_WIN_1809_X64: v48372186-ff395e52
CI_IMAGE_WIN_1809_X64_SUFFIX: ""
- CI_IMAGE_WIN_LTSC2022_X64: v48262719-bfb00f80
+ CI_IMAGE_WIN_LTSC2022_X64: v48372186-ff395e52
CI_IMAGE_WIN_LTSC2022_X64_SUFFIX: ""
DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded
@@ -746,6 +746,7 @@ workflow:
- .gitlab/kernel_matrix_testing/security_agent.yml
- .gitlab/kernel_matrix_testing/common.yml
- .gitlab/source_test/ebpf.yml
+ - test/new-e2e/tests/cws/**/*
- test/new-e2e/system-probe/**/*
- test/new-e2e/scenarios/system-probe/**/*
- test/new-e2e/pkg/runner/**/*
@@ -965,9 +966,7 @@ workflow:
.on_cws_or_e2e_changes:
- !reference [.on_e2e_main_release_or_rc]
- changes:
- paths:
- # TODO: Add paths that should trigger tests for CWS
- - test/new-e2e/tests/cws/**/*
+ paths: *security_agent_change_paths
compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
.on_process_or_e2e_changes:
diff --git a/.gitlab/binary_build/system_probe.yml b/.gitlab/binary_build/system_probe.yml
index a12defc7cdb53..5d897fb7f5f3e 100644
--- a/.gitlab/binary_build/system_probe.yml
+++ b/.gitlab/binary_build/system_probe.yml
@@ -12,7 +12,7 @@
- find "$CI_BUILDS_DIR" ! -path '*DataDog/datadog-agent*' -delete || true # Allow failure, we can't remove parent folders of datadog-agent
script:
- inv check-go-version
- - inv -e system-probe.build --strip-object-files --no-bundle
+ - inv -e system-probe.build --strip-object-files
# fail if references to glibc >= 2.18
- objdump -p $CI_PROJECT_DIR/$SYSTEM_PROBE_BINARIES_DIR/system-probe | egrep 'GLIBC_2\.(1[8-9]|[2-9][0-9])' && exit 1
- inv -e system-probe.save-build-outputs $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz
diff --git a/.gitlab/common/macos.yml b/.gitlab/common/macos.yml
index 59b89f44aec66..5dcf60ab2e130 100644
--- a/.gitlab/common/macos.yml
+++ b/.gitlab/common/macos.yml
@@ -33,6 +33,11 @@
fi
- pyenv activate $VENV_NAME
+.vault_login:
+ # Point the CLI to our internal vault
+ - export VAULT_ADDR=https://vault.us1.ddbuild.io
+ - vault login -method=aws -no-print
+
.macos_gitlab:
before_script:
# Selecting the current Go version
diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml
index 037fc229a8c02..3bf60ee98c97f 100644
--- a/.gitlab/common/test_infra_version.yml
+++ b/.gitlab/common/test_infra_version.yml
@@ -4,4 +4,4 @@ variables:
# and check the job creating the image to make sure you have the right SHA prefix
TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: ""
# Make sure to update test-infra-definitions version in go.mod as well
- TEST_INFRA_DEFINITIONS_BUILDIMAGES: ec7e1e40abcd
+ TEST_INFRA_DEFINITIONS_BUILDIMAGES: b436617374bf
diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml
index 40a95688fb0d8..d5a5b7a8ce3f6 100644
--- a/.gitlab/e2e/e2e.yml
+++ b/.gitlab/e2e/e2e.yml
@@ -24,9 +24,10 @@
- ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET) || exit $?; export ARM_CLIENT_SECRET
- ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID) || exit $?; export ARM_TENANT_ID
- ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) || exit $?; export ARM_SUBSCRIPTION_ID
- # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/
+ # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/service-account/
# The service account is called `agent-e2e-tests`
- - GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS) || exit $?; export GOOGLE_CREDENTIALS
+ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS > ~/gcp-credentials.json || exit $?
+ - export GOOGLE_APPLICATION_CREDENTIALS=~/gcp-credentials.json
# Generate external links to CI VISIBILITY, used by artifacts:reports:annotations
- inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH
variables:
@@ -500,7 +501,7 @@ new-e2e-otel-eks-init:
variables:
TARGETS: ./tests/otel
TEAM: otel
- EXTRA_PARAMS: --run TestOTelAgentIAEKS
+ EXTRA_PARAMS: --run "TestOTelAgentIA(EKS|USTEKS)"
E2E_INIT_ONLY: "true"
SHOULD_RUN_IN_FLAKES_FINDER: "false"
@@ -519,7 +520,7 @@ new-e2e-otel-eks:
- new-e2e-otel-eks-init
variables:
TARGETS: ./tests/otel
- EXTRA_PARAMS: --run TestOTelAgentIAEKS
+ EXTRA_PARAMS: --run "TestOTelAgentIA(EKS|USTEKS)"
TEAM: otel
E2E_PRE_INITIALIZED: "true"
@@ -535,7 +536,7 @@ new-e2e-otel:
- qa_agent_ot
variables:
TARGETS: ./tests/otel
- EXTRA_PARAMS: --skip TestOTelAgentIAEKS
+ EXTRA_PARAMS: --skip "TestOTelAgentIA(EKS|USTEKS)"
TEAM: otel
.new-e2e_package_signing:
@@ -656,4 +657,3 @@ new-e2e-eks-cleanup-on-failure:
- !reference [.except_mergequeue]
- when: always
allow_failure: true
-
diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml
index 4009cf1119e94..07496f6419feb 100644
--- a/.gitlab/functional_test/regression_detector.yml
+++ b/.gitlab/functional_test/regression_detector.yml
@@ -18,20 +18,13 @@ single-machine-performance-regression_detector:
- outputs/regression_signal.json # for debugging, also on S3
- outputs/bounds_check_signal.json # for debugging, also on S3
- outputs/junit.xml # for debugging, also on S3
+ - outputs/report.json # for debugging, also on S3
+ - outputs/decision_record.md # for posterity, this is appended to final PR comment
when: always
variables:
- SMP_VERSION: 0.18.1
- # At present we require two artifacts to exist for the 'baseline' and the
- # 'comparison'. We are guaranteed by the structure of the pipeline that
- # 'comparison' exists, not so much with 'baseline' as it has to come from main
- # merge pipeline run. This is solved in datadog-agent by updating a file in S3
- # with the SHA of the merge base from main. It's solved in Vector by
- # building Vector twice for each Regression Detector run.
- #
- # We allow failure for now. _Unfortunately_ this also means that if the
- # Regression Detector finds a performance issue with a PR it will not be
- # flagged.
- allow_failure: true
+ SMP_VERSION: 0.18.2
+ # See 'decision_record.md' for the determination of whether this job passes or fails.
+ allow_failure: false
script:
# Ensure output files exist for artifact downloads step
- mkdir outputs # Also needed for smp job sync step
@@ -129,10 +122,65 @@ single-machine-performance-regression_detector:
# uploading JUnit XML, so the upload command below respects that convention.
- DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$AGENT_API_KEY_ORG2" token)" || exit $?; export DATADOG_API_KEY
- datadog-ci junit upload --service datadog-agent outputs/junit.xml
- # Finally, exit 1 if the job signals a regression else 0.
- - RUST_LOG="${RUST_LOG}" ./smp --team-id ${SMP_AGENT_TEAM_ID} --api-base ${SMP_API} --aws-named-profile ${AWS_NAMED_PROFILE}
- job result
- --submission-metadata submission_metadata
+ # Run quality gate check script
+ - |
+ python3 <<'EOF'
+ import json
+ import sys
+
+ try:
+ with open('outputs/report.json') as f:
+ data = json.load(f)
+ except FileNotFoundError:
+ print("Machine readable report not found.")
+ sys.exit(1)
+ except json.JSONDecodeError as e:
+ print(f"Error parsing JSON report: {e}")
+ sys.exit(1)
+
+ experiments = data.get('experiments', {})
+ failed = False
+ decision_record = []
+
+ for exp_name, exp_data in experiments.items():
+ if exp_name.startswith('quality_gate_'):
+ bounds_checks = exp_data.get('bounds_checks', {})
+ for check_name, check_data in bounds_checks.items():
+ results = check_data.get('results', {})
+ comparison = results.get('comparison', [])
+ num_total = len(comparison)
+ failed_replicates = [
+ replicate for replicate in comparison if not replicate.get('passed', False)
+ ]
+ num_failed = len(failed_replicates)
+ num_passed = num_total - num_failed
+ if failed_replicates:
+ decision_record.append(
+ f"- **{exp_name}**, bounds check **{check_name}**: {num_passed}/{num_total} replicas passed. Failed {num_failed} which is > 0. Gate **FAILED**."
+ )
+ failed = True
+ else:
+ decision_record.append(
+ f"- **{exp_name}**, bounds check **{check_name}**: {num_passed}/{num_total} replicas passed. Gate passed."
+ )
+
+ with open('outputs/decision_record.md', 'w') as f:
+ # Extra newline since this is appended to another report
+ f.write('\n\n## CI Pass/Fail Decision\n\n')
+ if failed:
+ f.write('❌ **Failed.** Some Quality Gates were violated.\n\n')
+ f.write('\n'.join(decision_record))
+ else:
+ f.write('✅ **Passed.** All Quality Gates passed.\n\n')
+ f.write('\n'.join(decision_record))
+
+ if failed:
+ print("Quality gate failed, see decision record")
+ sys.exit(1)
+ else:
+ print("Quality gate passed.")
+ sys.exit(0)
+ EOF
# Shamelessly adapted from golang_deps_commenter job config in
# golang_deps_diff.yml at commit 01da274032e510d617161cf4e264a53292f44e55.
@@ -140,10 +188,10 @@ single-machine-performance-regression_detector-pr-comment:
stage: functional_test
rules:
- !reference [.except_main_or_release_branch]
- - when: on_success
+ - when: always
image:
name: "486234852809.dkr.ecr.us-east-1.amazonaws.com/pr-commenter:3"
- entrypoint: [""] # disable entrypoint script for the pr-commenter image
+ entrypoint: [""] # disable entrypoint script for the pr-commenter image
tags: ["arch:amd64"]
needs:
- job: single-machine-performance-regression_detector
@@ -151,11 +199,11 @@ single-machine-performance-regression_detector-pr-comment:
expire_in: 1 weeks
paths:
- report_as_json_string.txt # for debugging transform to valid JSON string
- - pr_comment_payload.json # for debugging PR commenter JSON payload bugs
+ - pr_comment_payload.json # for debugging PR commenter JSON payload bugs
variables:
# Not using the entrypoint script for the pr-commenter image
FF_KUBERNETES_HONOR_ENTRYPOINT: false
- allow_failure: true # allow_failure here should have same setting as in job above
+ allow_failure: true # allow_failure here should have same setting as in job above
script: # ignore error message about no PR, because it happens for dev branches without PRs
# Prevent posting empty Regression Detector report if Markdown report is not found or
# has zero size.
@@ -176,7 +224,8 @@ single-machine-performance-regression_detector-pr-comment:
# to escape double quotes to distinguish literal quotes in the report from
# the double quotes that delimit the value of the "message" field in the
# payload.
- - cat outputs/report.md | sed -z 's/\n/\\n/g' | sed -z 's/"/\\"/g' > report_as_json_string.txt
+ # Appends the Decision Record to final report
+ - cat outputs/report.md outputs/decision_record.md | sed -z 's/\n/\\n/g' | sed -z 's/"/\\"/g' > report_as_json_string.txt
- cat report_as_json_string.txt
# Transforming the Markdown report to a valid JSON string is easy to foul
# up, so to make debugging easier, we store the payload in a variable to
diff --git a/.gitlab/integration_test/otel.yml b/.gitlab/integration_test/otel.yml
index 27289d54b395f..aecb75afd9917 100644
--- a/.gitlab/integration_test/otel.yml
+++ b/.gitlab/integration_test/otel.yml
@@ -36,7 +36,7 @@ docker_image_build_otel:
- cp test/integration/docker/otel_agent_build_tests.py /tmp/otel-ci/
- wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O
/usr/bin/yq && chmod +x /usr/bin/yq
- - export OTELCOL_VERSION=v$(/usr/bin/yq r /tmp/otel-ci/manifest.yaml dist.otelcol_version)
+ - export OTELCOL_VERSION=v$(/usr/bin/yq r /tmp/otel-ci/manifest.yaml dist.version)
- yq w -i /tmp/otel-ci/manifest.yaml "receivers[+] gomod"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sobjectsreceiver ${OTELCOL_VERSION}"
- yq w -i /tmp/otel-ci/manifest.yaml "processors[+] gomod"
@@ -58,14 +58,14 @@ docker_image_build_otel:
ddflare_extension_ocb_build:
stage: integration_test
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
+ image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
needs: ["go_deps"]
before_script:
- !reference [.retrieve_linux_go_deps]
- mkdir -p /tmp/otel-ci
- cp test/otel/testdata/* /tmp/otel-ci/
- - wget -O /tmp/otel-ci/ocb https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/cmd%2Fbuilder%2Fv0.111.0/ocb_0.111.0_linux_amd64
+ - wget -O /tmp/otel-ci/ocb https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/cmd%2Fbuilder%2Fv0.113.0/ocb_0.113.0_linux_amd64
- chmod +x /tmp/otel-ci/ocb
script:
- echo 'Building collector with OCB and test ddflare extension'
diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
index c3b93f90ed13e..f98da3272164d 100644
--- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
+++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
@@ -67,8 +67,5 @@ notify-slack:
needs: ["internal_kubernetes_deploy_experimental"]
script:
- export SDM_JWT=$(vault read -field=token identity/oidc/token/sdm)
- # Python 3.12 changes default behavior how packages are installed.
- # In particular, --break-system-packages command line option is
- # required to use the old behavior or use a virtual env. https://github.com/actions/runner-images/issues/8615
- - python3 -m pip install -r tasks/requirements.txt --break-system-packages
+ - python3 -m pip install -r tasks/requirements.txt
- inv pipeline.changelog ${CI_COMMIT_SHORT_SHA} || exit $?
diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml
index dd6d1a5abe659..75eb68e927fb1 100644
--- a/.gitlab/kernel_matrix_testing/common.yml
+++ b/.gitlab/kernel_matrix_testing/common.yml
@@ -335,7 +335,7 @@ notify_ebpf_complexity_changes:
# Python 3.12 changes default behavior how packages are installed.
# In particular, --break-system-packages command line option is
# required to use the old behavior or use a virtual env. https://github.com/actions/runner-images/issues/8615
- - python3 -m pip install -r tasks/kernel_matrix_testing/requirements.txt --break-system-packages # Required for printing the tables
+ - python3 -m pip install -r tasks/kernel_matrix_testing/requirements-ci.txt --break-system-packages # Required for printing the tables
- python3 -m pip install -r tasks/libs/requirements-github.txt --break-system-packages
- !reference [.setup_agent_github_app]
- GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_TOKEN read_api) || exit $?; export GITLAB_TOKEN
diff --git a/.gitlab/package_build/linux.yml b/.gitlab/package_build/linux.yml
index 54df0971f046f..5dd7698bb2eda 100644
--- a/.gitlab/package_build/linux.yml
+++ b/.gitlab/package_build/linux.yml
@@ -80,36 +80,6 @@
before_script:
- export RELEASE_VERSION=$RELEASE_VERSION_7
-# Temporary custom agent build test to prevent regression
-# This test will be removed when custom path are used to build macos agent
-# with in-house macos runner builds.
-datadog-agent-7-x64-custom-path-test:
- extends: [.agent_build_x86, .agent_7_build]
- rules:
- - !reference [.except_mergequeue]
- - when: on_success
- stage: package_build
- script:
- - mkdir /custom
- - export CONFIG_DIR="/custom"
- - export INSTALL_DIR="/custom/datadog-agent"
- - !reference [.agent_build_script]
- - ls -la $OMNIBUS_PACKAGE_DIR
- - ls -la $INSTALL_DIR
- - ls -la /custom/etc
- - (ls -la /opt/datadog-agent 2>/dev/null && exit 1) || echo "/opt/datadog-agent has correctly not been generated"
- - (ls -la /etc/datadog-agent 2>/dev/null && exit 1) || echo "/etc/datadog-agent has correctly not been generated"
- variables:
- KUBERNETES_CPU_REQUEST: 16
- KUBERNETES_MEMORY_REQUEST: "32Gi"
- KUBERNETES_MEMORY_LIMIT: "32Gi"
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
- cache:
- - !reference [.cache_omnibus_ruby_deps, cache]
-
# build Agent 7 binaries for x86_64
datadog-agent-7-x64:
extends: [.agent_build_common, .agent_build_x86, .agent_7_build]
diff --git a/.gitlab/source_test/common.yml b/.gitlab/source_test/common.yml
new file mode 100644
index 0000000000000..35ad4187c3333
--- /dev/null
+++ b/.gitlab/source_test/common.yml
@@ -0,0 +1,8 @@
+---
+.upload_junit_source:
+ - $CI_PROJECT_DIR/tools/ci/junit_upload.sh
+
+.upload_coverage:
+ # Upload coverage files to Codecov. Never fail on coverage upload.
+ - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV token) || exit $?; export CODECOV_TOKEN
+ - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true
diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml
index 60666c8d4f5a1..629e88b551294 100644
--- a/.gitlab/source_test/include.yml
+++ b/.gitlab/source_test/include.yml
@@ -4,6 +4,7 @@
# security scans & go.mod checks.
include:
+ - .gitlab/source_test/common.yml # Included first for shared definitions
- .gitlab/source_test/ebpf.yml
- .gitlab/source_test/linux.yml
- .gitlab/source_test/macos.yml
diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml
index 0b7540f16e71a..aea05f28d4345 100644
--- a/.gitlab/source_test/linux.yml
+++ b/.gitlab/source_test/linux.yml
@@ -45,14 +45,6 @@
annotations:
- $EXTERNAL_LINKS_PATH
-.upload_junit_source:
- - $CI_PROJECT_DIR/tools/ci/junit_upload.sh
-
-.upload_coverage:
- # Upload coverage files to Codecov. Never fail on coverage upload.
- - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV token) || exit $?; export CODECOV_TOKEN
- - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true
-
.linux_x64:
image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml
index dc8a7ab8f1689..338f2f9eca347 100644
--- a/.gitlab/source_test/macos.yml
+++ b/.gitlab/source_test/macos.yml
@@ -64,18 +64,12 @@ tests_macos:
annotations:
- $EXTERNAL_LINKS_PATH
-.upload_junit_source:
- - $CI_PROJECT_DIR/tools/ci/junit_upload.sh
-
-.upload_coverage:
- # Upload coverage files to Codecov. Never fail on coverage upload.
- - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) || exit $?; export CODECOV_TOKEN
- - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true
-
tests_macos_gitlab_amd64:
extends: .tests_macos_gitlab
tags: ["macos:monterey-amd64", "specific:true"]
after_script:
+ - !reference [.vault_login]
+ - !reference [.select_python_env_commands]
- !reference [.upload_junit_source]
- !reference [.upload_coverage]
@@ -85,5 +79,7 @@ tests_macos_gitlab_arm64:
!reference [.manual]
tags: ["macos:monterey-arm64", "specific:true"]
after_script:
+ - !reference [.vault_login]
+ - !reference [.select_python_env_commands]
- !reference [.upload_junit_source]
- !reference [.upload_coverage]
diff --git a/.golangci.yml b/.golangci.yml
index 5e6c781919a56..0358a505b0118 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -605,6 +605,8 @@ linters-settings:
desc: "Not really forbidden to use, but it is usually imported by mistake instead of github.com/stretchr/testify/assert"
- pkg: "github.com/tj/assert"
desc: "Not really forbidden to use, but it is usually imported by mistake instead of github.com/stretchr/testify/assert, and confusing since it actually has the behavior of github.com/stretchr/testify/require"
+ - pkg: "debug/elf"
+ desc: "prefer pkg/util/safeelf to prevent panics during parsing"
errcheck:
exclude-functions:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b68d756cb8d3f..c665daa0c6a33 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -78,7 +78,7 @@ repos:
description: Format .net file of the MSI installer
language: system
# The dotnet format tool requires a solution file to operate.
- entry: dotnet format ./tools/windows/DatadogAgentInstaller --include
+ entry: dotnet format ./tools/windows/DatadogAgentInstaller --include
types: [c#]
- id: go-mod-tidy
name: go-mod-tidy
@@ -141,7 +141,7 @@ repos:
- id: check-go-modules-in-python
name: check-go-modules-in-python
description: Validate all go modules are declared in Invoke tasks
- entry: 'inv modules.validate'
+ entry: 'inv modules.validate --fix-format'
language: system
pass_filenames: false
- files: .*go\.mod$
+ files: (.*go\.mod|modules\.yml|.*gomodules\.py|.*modules\.py)$
diff --git a/.run/Build process-agent.run.xml b/.run/Build process-agent.run.xml
index 83f150ffea675..86a692091e442 100644
--- a/.run/Build process-agent.run.xml
+++ b/.run/Build process-agent.run.xml
@@ -12,7 +12,7 @@
-
+
diff --git a/.run/Build system-probe.run.xml b/.run/Build system-probe.run.xml
index 66fbc5e8112f0..253f0746744ff 100644
--- a/.run/Build system-probe.run.xml
+++ b/.run/Build system-probe.run.xml
@@ -13,7 +13,7 @@
-
+
diff --git a/Dockerfiles/manifests/agent-only/README.md b/Dockerfiles/manifests/agent-only/README.md
index ecabf181d5a80..cfa253d78d9c4 100644
--- a/Dockerfiles/manifests/agent-only/README.md
+++ b/Dockerfiles/manifests/agent-only/README.md
@@ -1,6 +1,6 @@
The kubernetes manifests found in this directory have been automatically generated
from the [helm chart `datadog/datadog`](https://github.com/DataDog/helm-charts/tree/master/charts/datadog)
-version 3.49.6 with the following `values.yaml`:
+version 3.79.0 with the following `values.yaml`:
```yaml
datadog:
@@ -10,4 +10,6 @@ datadog:
socketEnabled: false
processAgent:
enabled: false
+ containerCollection: false
+ processDiscovery: false
```
diff --git a/Dockerfiles/manifests/agent-only/agent-services.yaml b/Dockerfiles/manifests/agent-only/agent-services.yaml
index fe47d43938f14..3231cf2608b78 100644
--- a/Dockerfiles/manifests/agent-only/agent-services.yaml
+++ b/Dockerfiles/manifests/agent-only/agent-services.yaml
@@ -23,7 +23,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -43,7 +43,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
diff --git a/Dockerfiles/manifests/agent-only/cluster-agent-confd-configmap.yaml b/Dockerfiles/manifests/agent-only/cluster-agent-confd-configmap.yaml
index 06f30e15298f0..9ea7e3c86612e 100644
--- a/Dockerfiles/manifests/agent-only/cluster-agent-confd-configmap.yaml
+++ b/Dockerfiles/manifests/agent-only/cluster-agent-confd-configmap.yaml
@@ -39,3 +39,8 @@ data:
{}
annotations_as_tags:
{}
+ kubernetes_apiserver.yaml: |-
+ init_config:
+ instances:
+ - filtering_enabled: false
+ unbundle_events: false
diff --git a/Dockerfiles/manifests/agent-only/cluster-agent-deployment.yaml b/Dockerfiles/manifests/agent-only/cluster-agent-deployment.yaml
index df0b26121b7ad..4a29f0fc986a1 100644
--- a/Dockerfiles/manifests/agent-only/cluster-agent-deployment.yaml
+++ b/Dockerfiles/manifests/agent-only/cluster-agent-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: cluster-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-cluster-agent
name: datadog-cluster-agent
annotations: {}
@@ -32,7 +33,7 @@ spec:
automountServiceAccountToken: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -45,7 +46,7 @@ spec:
mountPath: /opt/datadog-agent
containers:
- name: cluster-agent
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
resources: {}
ports:
@@ -73,6 +74,10 @@ spec:
optional: true
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_ADMISSION_CONTROLLER_ENABLED
value: "true"
- name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME
@@ -89,6 +94,8 @@ spec:
value: "Ignore"
- name: DD_ADMISSION_CONTROLLER_PORT
value: "8000"
+ - name: DD_ADMISSION_CONTROLLER_CONTAINER_REGISTRY
+ value: "gcr.io/datadoghq"
- name: DD_REMOTE_CONFIGURATION_ENABLED
value: "false"
- name: DD_CLUSTER_CHECKS_ENABLED
@@ -109,6 +116,8 @@ spec:
value: datadogtoken
- name: DD_COLLECT_KUBERNETES_EVENTS
value: "true"
+ - name: DD_KUBERNETES_EVENTS_SOURCE_DETECTION_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
@@ -130,6 +139,23 @@ spec:
value: "true"
- name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED
value: "true"
+ - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED
+ value: "false"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
livenessProbe:
failureThreshold: 6
httpGet:
@@ -150,6 +176,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5556
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -188,6 +224,8 @@ spec:
items:
- key: kubernetes_state_core.yaml.default
path: kubernetes_state_core.yaml.default
+ - key: kubernetes_apiserver.yaml
+ path: kubernetes_apiserver.yaml
- name: config
emptyDir: {}
affinity:
diff --git a/Dockerfiles/manifests/agent-only/cluster-agent-rbac.yaml b/Dockerfiles/manifests/agent-only/cluster-agent-rbac.yaml
index 861b8809c541b..7f08178e193e6 100644
--- a/Dockerfiles/manifests/agent-only/cluster-agent-rbac.yaml
+++ b/Dockerfiles/manifests/agent-only/cluster-agent-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-agent
@@ -28,6 +28,7 @@ rules:
- nodes
- namespaces
- componentstatuses
+ - limitranges
verbs:
- get
- list
@@ -153,6 +154,7 @@ rules:
- networking.k8s.io
resources:
- ingresses
+ - networkpolicies
verbs:
- list
- get
@@ -168,6 +170,14 @@ rules:
- list
- get
- watch
+ - apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - list
+ - get
+ - watch
- apiGroups:
- autoscaling.k8s.io
resources:
@@ -187,6 +197,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
resourceNames:
- "datadog-webhook"
@@ -194,6 +205,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: ["create"]
- apiGroups: ["batch"]
diff --git a/Dockerfiles/manifests/agent-only/daemonset.yaml b/Dockerfiles/manifests/agent-only/daemonset.yaml
index edf2736a96244..df8f4ad9e1377 100644
--- a/Dockerfiles/manifests/agent-only/daemonset.yaml
+++ b/Dockerfiles/manifests/agent-only/daemonset.yaml
@@ -18,6 +18,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent
+ admission.datadoghq.com/enabled: "false"
app: datadog
name: datadog
annotations: {}
@@ -27,7 +28,7 @@ spec:
hostPID: true
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["agent", "run"]
resources: {}
@@ -47,10 +48,26 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
+ value: "false"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_DOGSTATSD_PORT
@@ -96,6 +113,8 @@ spec:
value: "false"
- name: DD_CONTAINER_IMAGE_ENABLED
value: "true"
+ - name: DD_KUBELET_CORE_CHECK_ENABLED
+ value: "true"
volumeMounts:
- name: logdatadog
mountPath: /var/log/datadog
@@ -151,9 +170,19 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5555
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -164,7 +193,7 @@ spec:
readOnly: false # Need RW for config path
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- bash
@@ -172,12 +201,12 @@ spec:
args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done
volumeMounts:
- - name: logdatadog
- mountPath: /var/log/datadog
- readOnly: false # Need RW to write logs
- name: config
mountPath: /etc/datadog-agent
readOnly: false # Need RW for config path
+ - name: logdatadog
+ mountPath: /var/log/datadog
+ readOnly: false # Need RW to write logs
- name: procdir
mountPath: /host/proc
mountPropagation: None
@@ -198,10 +227,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
resources: {}
volumes:
- name: auth-token
@@ -215,6 +250,8 @@ spec:
emptyDir: {}
- name: tmpdir
emptyDir: {}
+ - name: s6-run
+ emptyDir: {}
- hostPath:
path: /proc
name: procdir
@@ -228,8 +265,6 @@ spec:
path: /var/run/datadog/
type: DirectoryOrCreate
name: dsdsocket
- - name: s6-run
- emptyDir: {}
- hostPath:
path: /var/run
name: runtimesocketdir
diff --git a/Dockerfiles/manifests/all-containers/README.md b/Dockerfiles/manifests/all-containers/README.md
index ca4327c75cd74..da935d591b688 100644
--- a/Dockerfiles/manifests/all-containers/README.md
+++ b/Dockerfiles/manifests/all-containers/README.md
@@ -1,6 +1,6 @@
The kubernetes manifests found in this directory have been automatically generated
from the [helm chart `datadog/datadog`](https://github.com/DataDog/helm-charts/tree/master/charts/datadog)
-version 3.49.6 with the following `values.yaml`:
+version 3.79.0 with the following `values.yaml`:
```yaml
datadog:
diff --git a/Dockerfiles/manifests/all-containers/agent-services.yaml b/Dockerfiles/manifests/all-containers/agent-services.yaml
index fe47d43938f14..3231cf2608b78 100644
--- a/Dockerfiles/manifests/all-containers/agent-services.yaml
+++ b/Dockerfiles/manifests/all-containers/agent-services.yaml
@@ -23,7 +23,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -43,7 +43,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
diff --git a/Dockerfiles/manifests/all-containers/cluster-agent-confd-configmap.yaml b/Dockerfiles/manifests/all-containers/cluster-agent-confd-configmap.yaml
index 06f30e15298f0..9ea7e3c86612e 100644
--- a/Dockerfiles/manifests/all-containers/cluster-agent-confd-configmap.yaml
+++ b/Dockerfiles/manifests/all-containers/cluster-agent-confd-configmap.yaml
@@ -39,3 +39,8 @@ data:
{}
annotations_as_tags:
{}
+ kubernetes_apiserver.yaml: |-
+ init_config:
+ instances:
+ - filtering_enabled: false
+ unbundle_events: false
diff --git a/Dockerfiles/manifests/all-containers/cluster-agent-deployment.yaml b/Dockerfiles/manifests/all-containers/cluster-agent-deployment.yaml
index 42051b9bb6603..d94eb1dc98c3e 100644
--- a/Dockerfiles/manifests/all-containers/cluster-agent-deployment.yaml
+++ b/Dockerfiles/manifests/all-containers/cluster-agent-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: cluster-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-cluster-agent
name: datadog-cluster-agent
annotations: {}
@@ -32,7 +33,7 @@ spec:
automountServiceAccountToken: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -45,7 +46,7 @@ spec:
mountPath: /opt/datadog-agent
containers:
- name: cluster-agent
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
resources: {}
ports:
@@ -73,6 +74,10 @@ spec:
optional: true
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_ADMISSION_CONTROLLER_ENABLED
value: "true"
- name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME
@@ -89,6 +94,8 @@ spec:
value: "Ignore"
- name: DD_ADMISSION_CONTROLLER_PORT
value: "8000"
+ - name: DD_ADMISSION_CONTROLLER_CONTAINER_REGISTRY
+ value: "gcr.io/datadoghq"
- name: DD_REMOTE_CONFIGURATION_ENABLED
value: "false"
- name: DD_CLUSTER_CHECKS_ENABLED
@@ -109,6 +116,8 @@ spec:
value: datadogtoken
- name: DD_COLLECT_KUBERNETES_EVENTS
value: "true"
+ - name: DD_KUBERNETES_EVENTS_SOURCE_DETECTION_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
@@ -130,10 +139,27 @@ spec:
value: "true"
- name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED
value: "true"
+ - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED
+ value: "false"
- name: DD_COMPLIANCE_CONFIG_ENABLED
value: "true"
- name: DD_COMPLIANCE_CONFIG_CHECK_INTERVAL
value: "20m"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
livenessProbe:
failureThreshold: 6
httpGet:
@@ -154,6 +180,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5556
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -192,6 +228,8 @@ spec:
items:
- key: kubernetes_state_core.yaml.default
path: kubernetes_state_core.yaml.default
+ - key: kubernetes_apiserver.yaml
+ path: kubernetes_apiserver.yaml
- name: config
emptyDir: {}
affinity:
diff --git a/Dockerfiles/manifests/all-containers/cluster-agent-rbac.yaml b/Dockerfiles/manifests/all-containers/cluster-agent-rbac.yaml
index e2249f86a35db..12cde3e157e20 100644
--- a/Dockerfiles/manifests/all-containers/cluster-agent-rbac.yaml
+++ b/Dockerfiles/manifests/all-containers/cluster-agent-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-agent
@@ -28,6 +28,7 @@ rules:
- nodes
- namespaces
- componentstatuses
+ - limitranges
verbs:
- get
- list
@@ -153,6 +154,7 @@ rules:
- networking.k8s.io
resources:
- ingresses
+ - networkpolicies
verbs:
- list
- get
@@ -168,6 +170,14 @@ rules:
- list
- get
- watch
+ - apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - list
+ - get
+ - watch
- apiGroups:
- autoscaling.k8s.io
resources:
@@ -187,6 +197,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
resourceNames:
- "datadog-webhook"
@@ -194,6 +205,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: ["create"]
- apiGroups: ["batch"]
@@ -209,6 +221,14 @@ rules:
- namespaces
verbs:
- list
+ - apiGroups:
+ - "policy"
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
diff --git a/Dockerfiles/manifests/all-containers/daemonset.yaml b/Dockerfiles/manifests/all-containers/daemonset.yaml
index e02aaae547a8e..5090788b1cf55 100644
--- a/Dockerfiles/manifests/all-containers/daemonset.yaml
+++ b/Dockerfiles/manifests/all-containers/daemonset.yaml
@@ -18,6 +18,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent
+ admission.datadoghq.com/enabled: "false"
app: datadog
name: datadog
annotations:
@@ -28,7 +29,7 @@ spec:
hostPID: true
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["agent", "run"]
resources: {}
@@ -48,10 +49,26 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "true"
+ - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
+ value: "true"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_DOGSTATSD_PORT
@@ -70,7 +87,7 @@ spec:
name: datadog-cluster-agent
key: token
- name: DD_APM_ENABLED
- value: "false"
+ value: "true"
- name: DD_LOGS_ENABLED
value: "true"
- name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL
@@ -97,6 +114,8 @@ spec:
value: "true"
- name: DD_CONTAINER_IMAGE_ENABLED
value: "true"
+ - name: DD_KUBELET_CORE_CHECK_ENABLED
+ value: "true"
volumeMounts:
- name: logdatadog
mountPath: /var/log/datadog
@@ -175,8 +194,18 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5555
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
- name: trace-agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["trace-agent", "-config=/etc/datadog-agent/datadog.yaml"]
resources: {}
@@ -197,10 +226,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
@@ -222,6 +257,21 @@ spec:
value: "/var/run/datadog/apm.socket"
- name: DD_DOGSTATSD_SOCKET
value: "/var/run/datadog/dsd.socket"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
volumeMounts:
- name: config
mountPath: /etc/datadog-agent
@@ -257,7 +307,7 @@ spec:
port: 8126
timeoutSeconds: 5
- name: process-agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["process-agent", "--cfgpath=/etc/datadog-agent/datadog.yaml"]
resources: {}
@@ -273,10 +323,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
@@ -286,8 +342,16 @@ spec:
secretKeyRef:
name: datadog-cluster-agent
key: token
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "true"
- name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
value: "true"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_SYSTEM_PROBE_ENABLED
@@ -340,7 +404,7 @@ spec:
subPath: system-probe.yaml
readOnly: true
- name: system-probe
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
@@ -371,10 +435,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
resources: {}
@@ -419,7 +489,7 @@ spec:
mountPath: /host/etc/lsb-release
readOnly: true
- name: security-agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
@@ -438,10 +508,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
@@ -457,6 +533,10 @@ spec:
value: "true"
- name: DD_COMPLIANCE_CONFIG_CHECK_INTERVAL
value: "20m"
+ - name: DD_COMPLIANCE_CONFIG_XCCDF_ENABLED
+ value: "true"
+ - name: DD_COMPLIANCE_CONFIG_HOST_BENCHMARKS_ENABLED
+ value: "true"
- name: HOST_ROOT
value: /host/root
- name: DD_RUNTIME_SECURITY_CONFIG_ENABLED
@@ -465,6 +545,8 @@ spec:
value: "/etc/datadog-agent/runtime-security.d"
- name: DD_RUNTIME_SECURITY_CONFIG_SOCKET
value: /var/run/sysprobe/runtime-security.sock
+ - name: DD_RUNTIME_SECURITY_CONFIG_USE_SECRUNTIME_TRACK
+ value: "true"
- name: DD_DOGSTATSD_SOCKET
value: "/var/run/datadog/dsd.socket"
volumeMounts:
@@ -514,7 +596,7 @@ spec:
readOnly: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -525,7 +607,7 @@ spec:
readOnly: false # Need RW for config path
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- bash
@@ -533,12 +615,12 @@ spec:
args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done
volumeMounts:
- - name: logdatadog
- mountPath: /var/log/datadog
- readOnly: false # Need RW to write logs
- name: config
mountPath: /etc/datadog-agent
readOnly: false # Need RW for config path
+ - name: logdatadog
+ mountPath: /var/log/datadog
+ readOnly: false # Need RW to write logs
- name: procdir
mountPath: /host/proc
mountPropagation: None
@@ -563,13 +645,19 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
resources: {}
- name: seccomp-setup
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -596,6 +684,8 @@ spec:
emptyDir: {}
- name: tmpdir
emptyDir: {}
+ - name: s6-run
+ emptyDir: {}
- hostPath:
path: /proc
name: procdir
@@ -614,6 +704,9 @@ spec:
- hostPath:
path: /etc/lsb-release
name: etc-lsb-release
+ - hostPath:
+ path: /etc/system-release
+ name: etc-system-release
- hostPath:
path: /var/run/datadog/
type: DirectoryOrCreate
@@ -622,8 +715,6 @@ spec:
path: /var/run/datadog/
type: DirectoryOrCreate
name: apmsocket
- - name: s6-run
- emptyDir: {}
- name: sysprobe-config
configMap:
name: datadog-system-probe-config
@@ -647,6 +738,9 @@ spec:
- hostPath:
path: /etc/group
name: group
+ - hostPath:
+ path: /var/run
+ name: runtimesocketdir
- hostPath:
path: /var/lib/datadog-agent/logs
name: pointerdir
@@ -659,9 +753,6 @@ spec:
- hostPath:
path: /var/lib/docker/containers
name: logdockercontainerpath
- - hostPath:
- path: /var/run
- name: runtimesocketdir
tolerations:
affinity: {}
serviceAccountName: "datadog"
diff --git a/Dockerfiles/manifests/all-containers/system-probe-configmap.yaml b/Dockerfiles/manifests/all-containers/system-probe-configmap.yaml
index 33f010889518a..1facb353568eb 100644
--- a/Dockerfiles/manifests/all-containers/system-probe-configmap.yaml
+++ b/Dockerfiles/manifests/all-containers/system-probe-configmap.yaml
@@ -7,7 +7,7 @@ metadata:
namespace: default
labels: {}
data:
- system-probe.yaml: "system_probe_config:\n enabled: true\n debug_port: 0\n sysprobe_socket: /var/run/sysprobe/sysprobe.sock\n enable_conntrack: true\n bpf_debug: false\n enable_tcp_queue_length: false\n enable_oom_kill: false\n collect_dns_stats: true\n max_tracked_connections: 131072\n conntrack_max_state_size: 131072\n runtime_compiler_output_dir: /var/tmp/datadog-agent/system-probe/build\n kernel_header_download_dir: /var/tmp/datadog-agent/system-probe/kernel-headers\n apt_config_dir: /host/etc/apt\n yum_repos_dir: /host/etc/yum.repos.d\n zypper_repos_dir: /host/etc/zypp/repos.d\n btf_path: \nnetwork_config:\n enabled: true\n conntrack_init_timeout: 10s\nservice_monitoring_config:\n enabled: false\nruntime_security_config:\n enabled: true\n fim_enabled: false\n socket: /var/run/sysprobe/runtime-security.sock\n policies:\n dir: /etc/datadog-agent/runtime-security.d\n syscall_monitor:\n enabled: false\n network:\n enabled: true\n remote_configuration:\n enabled: true \n activity_dump:\n enabled: true\n traced_cgroups_count: 3\n cgroup_dump_timeout: 20\n cgroup_wait_list_size: 0\n path_merge:\n enabled: false\n\n security_profile:\n enabled: false\n"
+ system-probe.yaml: "system_probe_config:\n enabled: true\n debug_port: 0\n sysprobe_socket: /var/run/sysprobe/sysprobe.sock\n enable_conntrack: true\n bpf_debug: false\n enable_tcp_queue_length: false\n enable_oom_kill: false\n collect_dns_stats: true\n max_tracked_connections: 131072\n conntrack_max_state_size: 131072\n runtime_compiler_output_dir: /var/tmp/datadog-agent/system-probe/build\n kernel_header_download_dir: /var/tmp/datadog-agent/system-probe/kernel-headers\n apt_config_dir: /host/etc/apt\n yum_repos_dir: /host/etc/yum.repos.d\n zypper_repos_dir: /host/etc/zypp/repos.d\n btf_path: \nnetwork_config:\n enabled: true\n conntrack_init_timeout: 10s\nservice_monitoring_config:\n enabled: false\nruntime_security_config:\n enabled: true\n fim_enabled: false\n use_secruntime_track: true\n socket: /var/run/sysprobe/runtime-security.sock\n policies:\n dir: /etc/datadog-agent/runtime-security.d\n syscall_monitor:\n enabled: false\n network:\n enabled: true\n remote_configuration:\n enabled: true \n activity_dump:\n enabled: true\n traced_cgroups_count: 3\n cgroup_dump_timeout: 20\n cgroup_wait_list_size: 0\n path_merge:\n enabled: false\n\n security_profile:\n enabled: true\n anomaly_detection:\n enabled: true\n auto_suppression:\n enabled: true\n"
---
# This file has been generated by `helm template datadog-agent datadog/datadog` from datadog/templates/system-probe-configmap.yaml. Please re-run `generate.sh` rather than modifying this file manually.
apiVersion: v1
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/README.md b/Dockerfiles/manifests/cluster-agent-datadogmetrics/README.md
index 1b76186d128c3..e38455e0965b0 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/README.md
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/README.md
@@ -1,6 +1,6 @@
The kubernetes manifests found in this directory have been automatically generated
from the [helm chart `datadog/datadog`](https://github.com/DataDog/helm-charts/tree/master/charts/datadog)
-version 3.49.6 with the following `values.yaml`:
+version 3.79.0 with the following `values.yaml`:
```yaml
datadog:
@@ -9,6 +9,8 @@ datadog:
socketEnabled: false
processAgent:
enabled: false
+ containerCollection: false
+ processDiscovery: false
clusterAgent:
enabled: true
metricsProvider:
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/agent-services.yaml b/Dockerfiles/manifests/cluster-agent-datadogmetrics/agent-services.yaml
index 1136d1985fa33..ed2f19fb1c6b6 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/agent-services.yaml
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/agent-services.yaml
@@ -23,7 +23,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -43,7 +43,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -63,7 +63,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-confd-configmap.yaml b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-confd-configmap.yaml
index 06f30e15298f0..9ea7e3c86612e 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-confd-configmap.yaml
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-confd-configmap.yaml
@@ -39,3 +39,8 @@ data:
{}
annotations_as_tags:
{}
+ kubernetes_apiserver.yaml: |-
+ init_config:
+ instances:
+ - filtering_enabled: false
+ unbundle_events: false
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-deployment.yaml b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-deployment.yaml
index 8280b025ba2de..35272e03eec5e 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-deployment.yaml
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: cluster-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-cluster-agent
name: datadog-cluster-agent
annotations: {}
@@ -32,7 +33,7 @@ spec:
automountServiceAccountToken: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -45,7 +46,7 @@ spec:
mountPath: /opt/datadog-agent
containers:
- name: cluster-agent
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
resources: {}
ports:
@@ -76,6 +77,10 @@ spec:
optional: true
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_APP_KEY
valueFrom:
secretKeyRef:
@@ -107,6 +112,8 @@ spec:
value: "Ignore"
- name: DD_ADMISSION_CONTROLLER_PORT
value: "8000"
+ - name: DD_ADMISSION_CONTROLLER_CONTAINER_REGISTRY
+ value: "gcr.io/datadoghq"
- name: DD_REMOTE_CONFIGURATION_ENABLED
value: "false"
- name: DD_CLUSTER_CHECKS_ENABLED
@@ -127,6 +134,8 @@ spec:
value: datadogtoken
- name: DD_COLLECT_KUBERNETES_EVENTS
value: "true"
+ - name: DD_KUBERNETES_EVENTS_SOURCE_DETECTION_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
@@ -148,6 +157,23 @@ spec:
value: "true"
- name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED
value: "true"
+ - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED
+ value: "false"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
livenessProbe:
failureThreshold: 6
httpGet:
@@ -168,6 +194,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5556
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -206,6 +242,8 @@ spec:
items:
- key: kubernetes_state_core.yaml.default
path: kubernetes_state_core.yaml.default
+ - key: kubernetes_apiserver.yaml
+ path: kubernetes_apiserver.yaml
- name: config
emptyDir: {}
affinity:
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-rbac.yaml b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-rbac.yaml
index c379f535af000..8776635c9b408 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-rbac.yaml
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/cluster-agent-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-agent
@@ -28,6 +28,7 @@ rules:
- nodes
- namespaces
- componentstatuses
+ - limitranges
verbs:
- get
- list
@@ -164,6 +165,7 @@ rules:
- networking.k8s.io
resources:
- ingresses
+ - networkpolicies
verbs:
- list
- get
@@ -179,6 +181,14 @@ rules:
- list
- get
- watch
+ - apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - list
+ - get
+ - watch
- apiGroups:
- autoscaling.k8s.io
resources:
@@ -213,6 +223,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
resourceNames:
- "datadog-webhook"
@@ -220,6 +231,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: ["create"]
- apiGroups: ["batch"]
@@ -259,7 +271,7 @@ kind: ClusterRoleBinding
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
name: datadog-cluster-agent-system-auth-delegator
diff --git a/Dockerfiles/manifests/cluster-agent-datadogmetrics/daemonset.yaml b/Dockerfiles/manifests/cluster-agent-datadogmetrics/daemonset.yaml
index edf2736a96244..df8f4ad9e1377 100644
--- a/Dockerfiles/manifests/cluster-agent-datadogmetrics/daemonset.yaml
+++ b/Dockerfiles/manifests/cluster-agent-datadogmetrics/daemonset.yaml
@@ -18,6 +18,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent
+ admission.datadoghq.com/enabled: "false"
app: datadog
name: datadog
annotations: {}
@@ -27,7 +28,7 @@ spec:
hostPID: true
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["agent", "run"]
resources: {}
@@ -47,10 +48,26 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
+ value: "false"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_DOGSTATSD_PORT
@@ -96,6 +113,8 @@ spec:
value: "false"
- name: DD_CONTAINER_IMAGE_ENABLED
value: "true"
+ - name: DD_KUBELET_CORE_CHECK_ENABLED
+ value: "true"
volumeMounts:
- name: logdatadog
mountPath: /var/log/datadog
@@ -151,9 +170,19 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5555
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -164,7 +193,7 @@ spec:
readOnly: false # Need RW for config path
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- bash
@@ -172,12 +201,12 @@ spec:
args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done
volumeMounts:
- - name: logdatadog
- mountPath: /var/log/datadog
- readOnly: false # Need RW to write logs
- name: config
mountPath: /etc/datadog-agent
readOnly: false # Need RW for config path
+ - name: logdatadog
+ mountPath: /var/log/datadog
+ readOnly: false # Need RW to write logs
- name: procdir
mountPath: /host/proc
mountPropagation: None
@@ -198,10 +227,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
resources: {}
volumes:
- name: auth-token
@@ -215,6 +250,8 @@ spec:
emptyDir: {}
- name: tmpdir
emptyDir: {}
+ - name: s6-run
+ emptyDir: {}
- hostPath:
path: /proc
name: procdir
@@ -228,8 +265,6 @@ spec:
path: /var/run/datadog/
type: DirectoryOrCreate
name: dsdsocket
- - name: s6-run
- emptyDir: {}
- hostPath:
path: /var/run
name: runtimesocketdir
diff --git a/Dockerfiles/manifests/cluster-agent/README.md b/Dockerfiles/manifests/cluster-agent/README.md
index eff3f871c3bf8..666aa347c1b8e 100644
--- a/Dockerfiles/manifests/cluster-agent/README.md
+++ b/Dockerfiles/manifests/cluster-agent/README.md
@@ -1,6 +1,6 @@
The kubernetes manifests found in this directory have been automatically generated
from the [helm chart `datadog/datadog`](https://github.com/DataDog/helm-charts/tree/master/charts/datadog)
-version 3.49.6 with the following `values.yaml`:
+version 3.79.0 with the following `values.yaml`:
```yaml
datadog:
@@ -9,6 +9,8 @@ datadog:
socketEnabled: false
processAgent:
enabled: false
+ containerCollection: false
+ processDiscovery: false
clusterAgent:
enabled: true
metricsProvider:
diff --git a/Dockerfiles/manifests/cluster-agent/agent-services.yaml b/Dockerfiles/manifests/cluster-agent/agent-services.yaml
index 1136d1985fa33..ed2f19fb1c6b6 100644
--- a/Dockerfiles/manifests/cluster-agent/agent-services.yaml
+++ b/Dockerfiles/manifests/cluster-agent/agent-services.yaml
@@ -23,7 +23,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -43,7 +43,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -63,7 +63,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
diff --git a/Dockerfiles/manifests/cluster-agent/cluster-agent-confd-configmap.yaml b/Dockerfiles/manifests/cluster-agent/cluster-agent-confd-configmap.yaml
index 06f30e15298f0..9ea7e3c86612e 100644
--- a/Dockerfiles/manifests/cluster-agent/cluster-agent-confd-configmap.yaml
+++ b/Dockerfiles/manifests/cluster-agent/cluster-agent-confd-configmap.yaml
@@ -39,3 +39,8 @@ data:
{}
annotations_as_tags:
{}
+ kubernetes_apiserver.yaml: |-
+ init_config:
+ instances:
+ - filtering_enabled: false
+ unbundle_events: false
diff --git a/Dockerfiles/manifests/cluster-agent/cluster-agent-deployment.yaml b/Dockerfiles/manifests/cluster-agent/cluster-agent-deployment.yaml
index 31fd735f94552..a2ac9fff7100b 100644
--- a/Dockerfiles/manifests/cluster-agent/cluster-agent-deployment.yaml
+++ b/Dockerfiles/manifests/cluster-agent/cluster-agent-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: cluster-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-cluster-agent
name: datadog-cluster-agent
annotations: {}
@@ -32,7 +33,7 @@ spec:
automountServiceAccountToken: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -45,7 +46,7 @@ spec:
mountPath: /opt/datadog-agent
containers:
- name: cluster-agent
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
resources: {}
ports:
@@ -76,6 +77,10 @@ spec:
optional: true
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_APP_KEY
valueFrom:
secretKeyRef:
@@ -107,6 +112,8 @@ spec:
value: "Ignore"
- name: DD_ADMISSION_CONTROLLER_PORT
value: "8000"
+ - name: DD_ADMISSION_CONTROLLER_CONTAINER_REGISTRY
+ value: "gcr.io/datadoghq"
- name: DD_REMOTE_CONFIGURATION_ENABLED
value: "false"
- name: DD_CLUSTER_CHECKS_ENABLED
@@ -127,6 +134,8 @@ spec:
value: datadogtoken
- name: DD_COLLECT_KUBERNETES_EVENTS
value: "true"
+ - name: DD_KUBERNETES_EVENTS_SOURCE_DETECTION_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
@@ -148,6 +157,23 @@ spec:
value: "true"
- name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED
value: "true"
+ - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED
+ value: "false"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
livenessProbe:
failureThreshold: 6
httpGet:
@@ -168,6 +194,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5556
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -206,6 +242,8 @@ spec:
items:
- key: kubernetes_state_core.yaml.default
path: kubernetes_state_core.yaml.default
+ - key: kubernetes_apiserver.yaml
+ path: kubernetes_apiserver.yaml
- name: config
emptyDir: {}
affinity:
diff --git a/Dockerfiles/manifests/cluster-agent/cluster-agent-rbac.yaml b/Dockerfiles/manifests/cluster-agent/cluster-agent-rbac.yaml
index c1bb2650d7af1..70da4462ac80c 100644
--- a/Dockerfiles/manifests/cluster-agent/cluster-agent-rbac.yaml
+++ b/Dockerfiles/manifests/cluster-agent/cluster-agent-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-agent
@@ -28,6 +28,7 @@ rules:
- nodes
- namespaces
- componentstatuses
+ - limitranges
verbs:
- get
- list
@@ -164,6 +165,7 @@ rules:
- networking.k8s.io
resources:
- ingresses
+ - networkpolicies
verbs:
- list
- get
@@ -179,6 +181,14 @@ rules:
- list
- get
- watch
+ - apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - list
+ - get
+ - watch
- apiGroups:
- autoscaling.k8s.io
resources:
@@ -198,6 +208,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
resourceNames:
- "datadog-webhook"
@@ -205,6 +216,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: ["create"]
- apiGroups: ["batch"]
@@ -244,7 +256,7 @@ kind: ClusterRoleBinding
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
name: datadog-cluster-agent-system-auth-delegator
diff --git a/Dockerfiles/manifests/cluster-agent/daemonset.yaml b/Dockerfiles/manifests/cluster-agent/daemonset.yaml
index edf2736a96244..df8f4ad9e1377 100644
--- a/Dockerfiles/manifests/cluster-agent/daemonset.yaml
+++ b/Dockerfiles/manifests/cluster-agent/daemonset.yaml
@@ -18,6 +18,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent
+ admission.datadoghq.com/enabled: "false"
app: datadog
name: datadog
annotations: {}
@@ -27,7 +28,7 @@ spec:
hostPID: true
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["agent", "run"]
resources: {}
@@ -47,10 +48,26 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
+ value: "false"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_DOGSTATSD_PORT
@@ -96,6 +113,8 @@ spec:
value: "false"
- name: DD_CONTAINER_IMAGE_ENABLED
value: "true"
+ - name: DD_KUBELET_CORE_CHECK_ENABLED
+ value: "true"
volumeMounts:
- name: logdatadog
mountPath: /var/log/datadog
@@ -151,9 +170,19 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5555
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -164,7 +193,7 @@ spec:
readOnly: false # Need RW for config path
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- bash
@@ -172,12 +201,12 @@ spec:
args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done
volumeMounts:
- - name: logdatadog
- mountPath: /var/log/datadog
- readOnly: false # Need RW to write logs
- name: config
mountPath: /etc/datadog-agent
readOnly: false # Need RW for config path
+ - name: logdatadog
+ mountPath: /var/log/datadog
+ readOnly: false # Need RW to write logs
- name: procdir
mountPath: /host/proc
mountPropagation: None
@@ -198,10 +227,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
resources: {}
volumes:
- name: auth-token
@@ -215,6 +250,8 @@ spec:
emptyDir: {}
- name: tmpdir
emptyDir: {}
+ - name: s6-run
+ emptyDir: {}
- hostPath:
path: /proc
name: procdir
@@ -228,8 +265,6 @@ spec:
path: /var/run/datadog/
type: DirectoryOrCreate
name: dsdsocket
- - name: s6-run
- emptyDir: {}
- hostPath:
path: /var/run
name: runtimesocketdir
diff --git a/Dockerfiles/manifests/cluster-checks-runners/README.md b/Dockerfiles/manifests/cluster-checks-runners/README.md
index 4c9ed6bee4768..b05431a6a0a4d 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/README.md
+++ b/Dockerfiles/manifests/cluster-checks-runners/README.md
@@ -1,6 +1,6 @@
The kubernetes manifests found in this directory have been automatically generated
from the [helm chart `datadog/datadog`](https://github.com/DataDog/helm-charts/tree/master/charts/datadog)
-version 3.49.6 with the following `values.yaml`:
+version 3.79.0 with the following `values.yaml`:
```yaml
datadog:
@@ -9,6 +9,8 @@ datadog:
socketEnabled: false
processAgent:
enabled: false
+ containerCollection: false
+ processDiscovery: false
clusterChecks:
enabled: true
clusterAgent:
diff --git a/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-deployment.yaml b/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-deployment.yaml
index 4bc1d4cf8574d..6a851fb9924fd 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-deployment.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: clusterchecks-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-clusterchecks
name: datadog-clusterchecks
annotations: {}
@@ -33,7 +34,7 @@ spec:
imagePullSecrets: []
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -44,7 +45,7 @@ spec:
readOnly: false # Need RW for writing agent config files
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -56,19 +57,19 @@ spec:
resources: {}
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
command: ["bash", "-c"]
args:
- - rm -rf /etc/datadog-agent/conf.d && touch /etc/datadog-agent/datadog.yaml && exec agent run
+ - find /etc/datadog-agent/conf.d/ -name "*.yaml.default" -type f -delete && touch /etc/datadog-agent/datadog.yaml && exec agent run
imagePullPolicy: IfNotPresent
env:
+ - name: KUBERNETES
+ value: "yes"
- name: DD_API_KEY
valueFrom:
secretKeyRef:
name: "datadog"
key: api-key
- - name: KUBERNETES
- value: "yes"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_EXTRA_CONFIG_PROVIDERS
@@ -143,6 +144,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5557
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
volumes:
- name: installinfo
configMap:
diff --git a/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-rbac.yaml b/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-rbac.yaml
index e9d0c030b2a0e..2fb7445383555 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-rbac.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/agent-clusterchecks-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-checks
diff --git a/Dockerfiles/manifests/cluster-checks-runners/agent-services.yaml b/Dockerfiles/manifests/cluster-checks-runners/agent-services.yaml
index 1136d1985fa33..ed2f19fb1c6b6 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/agent-services.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/agent-services.yaml
@@ -23,7 +23,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -43,7 +43,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
@@ -63,7 +63,7 @@ metadata:
namespace: default
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
spec:
diff --git a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-confd-configmap.yaml b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-confd-configmap.yaml
index 06f30e15298f0..9ea7e3c86612e 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-confd-configmap.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-confd-configmap.yaml
@@ -39,3 +39,8 @@ data:
{}
annotations_as_tags:
{}
+ kubernetes_apiserver.yaml: |-
+ init_config:
+ instances:
+ - filtering_enabled: false
+ unbundle_events: false
diff --git a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-deployment.yaml b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-deployment.yaml
index 24211475b2abd..308e849746b2c 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-deployment.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-deployment.yaml
@@ -24,6 +24,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: cluster-agent
+ admission.datadoghq.com/enabled: "false"
app: datadog-cluster-agent
name: datadog-cluster-agent
annotations: {}
@@ -32,7 +33,7 @@ spec:
automountServiceAccountToken: true
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- cp
@@ -45,7 +46,7 @@ spec:
mountPath: /opt/datadog-agent
containers:
- name: cluster-agent
- image: "gcr.io/datadoghq/cluster-agent:7.49.1"
+ image: "gcr.io/datadoghq/cluster-agent:7.59.0"
imagePullPolicy: IfNotPresent
resources: {}
ports:
@@ -76,6 +77,10 @@ spec:
optional: true
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_APP_KEY
valueFrom:
secretKeyRef:
@@ -107,6 +112,8 @@ spec:
value: "Ignore"
- name: DD_ADMISSION_CONTROLLER_PORT
value: "8000"
+ - name: DD_ADMISSION_CONTROLLER_CONTAINER_REGISTRY
+ value: "gcr.io/datadoghq"
- name: DD_REMOTE_CONFIGURATION_ENABLED
value: "false"
- name: DD_CLUSTER_CHECKS_ENABLED
@@ -129,6 +136,8 @@ spec:
value: datadogtoken
- name: DD_COLLECT_KUBERNETES_EVENTS
value: "true"
+ - name: DD_KUBERNETES_EVENTS_SOURCE_DETECTION_ENABLED
+ value: "false"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
@@ -150,6 +159,23 @@ spec:
value: "true"
- name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED
value: "true"
+ - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED
+ value: "false"
+ - name: DD_INSTRUMENTATION_INSTALL_TIME
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_time
+ - name: DD_INSTRUMENTATION_INSTALL_ID
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_id
+ - name: DD_INSTRUMENTATION_INSTALL_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: datadog-kpi-telemetry-configmap
+ key: install_type
livenessProbe:
failureThreshold: 6
httpGet:
@@ -170,6 +196,16 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5556
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@@ -208,6 +244,8 @@ spec:
items:
- key: kubernetes_state_core.yaml.default
path: kubernetes_state_core.yaml.default
+ - key: kubernetes_apiserver.yaml
+ path: kubernetes_apiserver.yaml
- name: config
emptyDir: {}
affinity:
diff --git a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-rbac.yaml b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-rbac.yaml
index c1bb2650d7af1..70da4462ac80c 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-rbac.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/cluster-agent-rbac.yaml
@@ -6,7 +6,7 @@ automountServiceAccountToken: true
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
heritage: "Helm"
release: "datadog"
name: datadog-cluster-agent
@@ -28,6 +28,7 @@ rules:
- nodes
- namespaces
- componentstatuses
+ - limitranges
verbs:
- get
- list
@@ -164,6 +165,7 @@ rules:
- networking.k8s.io
resources:
- ingresses
+ - networkpolicies
verbs:
- list
- get
@@ -179,6 +181,14 @@ rules:
- list
- get
- watch
+ - apiGroups:
+ - "storage.k8s.io"
+ resources:
+ - storageclasses
+ verbs:
+ - list
+ - get
+ - watch
- apiGroups:
- autoscaling.k8s.io
resources:
@@ -198,6 +208,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
resourceNames:
- "datadog-webhook"
@@ -205,6 +216,7 @@ rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
+ - validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: ["create"]
- apiGroups: ["batch"]
@@ -244,7 +256,7 @@ kind: ClusterRoleBinding
metadata:
labels:
app: "datadog"
- chart: "datadog-3.49.6"
+ chart: "datadog-3.79.0"
release: "datadog"
heritage: "Helm"
name: datadog-cluster-agent-system-auth-delegator
diff --git a/Dockerfiles/manifests/cluster-checks-runners/daemonset.yaml b/Dockerfiles/manifests/cluster-checks-runners/daemonset.yaml
index 69dc3a981eecf..a8a0fd4ca3935 100644
--- a/Dockerfiles/manifests/cluster-checks-runners/daemonset.yaml
+++ b/Dockerfiles/manifests/cluster-checks-runners/daemonset.yaml
@@ -18,6 +18,7 @@ spec:
app.kubernetes.io/instance: "datadog"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent
+ admission.datadoghq.com/enabled: "false"
app: datadog
name: datadog
annotations: {}
@@ -27,7 +28,7 @@ spec:
hostPID: true
containers:
- name: agent
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["agent", "run"]
resources: {}
@@ -47,10 +48,26 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED
+ value: "false"
+ - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
+ value: "false"
+ - name: DD_STRIP_PROCESS_ARGS
+ value: "false"
+ - name: DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED
+ value: "false"
- name: DD_LOG_LEVEL
value: "INFO"
- name: DD_DOGSTATSD_PORT
@@ -96,6 +113,8 @@ spec:
value: "false"
- name: DD_CONTAINER_IMAGE_ENABLED
value: "true"
+ - name: DD_KUBELET_CORE_CHECK_ENABLED
+ value: "true"
volumeMounts:
- name: logdatadog
mountPath: /var/log/datadog
@@ -151,9 +170,19 @@ spec:
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
+ startupProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /startup
+ port: 5555
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ timeoutSeconds: 5
initContainers:
- name: init-volume
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command: ["bash", "-c"]
args:
@@ -164,7 +193,7 @@ spec:
readOnly: false # Need RW for config path
resources: {}
- name: init-config
- image: "gcr.io/datadoghq/agent:7.49.1"
+ image: "gcr.io/datadoghq/agent:7.59.0"
imagePullPolicy: IfNotPresent
command:
- bash
@@ -172,12 +201,12 @@ spec:
args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done
volumeMounts:
- - name: logdatadog
- mountPath: /var/log/datadog
- readOnly: false # Need RW to write logs
- name: config
mountPath: /etc/datadog-agent
readOnly: false # Need RW for config path
+ - name: logdatadog
+ mountPath: /var/log/datadog
+ readOnly: false # Need RW to write logs
- name: procdir
mountPath: /host/proc
mountPropagation: None
@@ -198,10 +227,16 @@ spec:
value: /etc/datadog-agent/auth/token
- name: KUBERNETES
value: "yes"
+ - name: DD_LANGUAGE_DETECTION_ENABLED
+ value: "false"
+ - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED
+ value: "false"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: DD_OTLP_CONFIG_LOGS_ENABLED
+ value: "false"
resources: {}
volumes:
- name: auth-token
@@ -215,6 +250,8 @@ spec:
emptyDir: {}
- name: tmpdir
emptyDir: {}
+ - name: s6-run
+ emptyDir: {}
- hostPath:
path: /proc
name: procdir
@@ -228,8 +265,6 @@ spec:
path: /var/run/datadog/
type: DirectoryOrCreate
name: dsdsocket
- - name: s6-run
- emptyDir: {}
- hostPath:
path: /var/run
name: runtimesocketdir
diff --git a/Dockerfiles/manifests/generate.sh b/Dockerfiles/manifests/generate.sh
index 18c3e63b4b0ee..cbbf142dfaed7 100755
--- a/Dockerfiles/manifests/generate.sh
+++ b/Dockerfiles/manifests/generate.sh
@@ -24,6 +24,8 @@ datadog:
socketEnabled: false
processAgent:
enabled: false
+ containerCollection: false
+ processDiscovery: false
EOF
cat > "$TMPDIR/values-all-containers.yaml" < remoteAgentIdleTimeout {
+ agentsToRemove = append(agentsToRemove, id)
+ }
+ }
+
+ for _, id := range agentsToRemove {
+ delete(ra.agentMap, id)
+ log.Infof("Remote agent '%s' deregistered after being idle for %s.", id, remoteAgentIdleTimeout)
+ }
+
+ ra.agentMapMu.Unlock()
+ }
+ }
+ }()
+}
+
+func (ra *remoteAgentRegistry) getQueryTimeout() time.Duration {
+ return ra.conf.GetDuration("remote_agent_registry.query_timeout")
+}
+
+// GetRegisteredAgents returns the list of registered remote agents.
+func (ra *remoteAgentRegistry) GetRegisteredAgents() []*remoteagentregistry.RegisteredAgent {
+ ra.agentMapMu.Lock()
+ defer ra.agentMapMu.Unlock()
+
+ agents := make([]*remoteagentregistry.RegisteredAgent, 0, len(ra.agentMap))
+ for _, details := range ra.agentMap {
+ agents = append(agents, &remoteagentregistry.RegisteredAgent{
+ DisplayName: details.displayName,
+ LastSeenUnix: details.lastSeen.Unix(),
+ })
+ }
+
+ return agents
+}
+
+// GetRegisteredAgentStatuses returns the status of all registered remote agents.
+func (ra *remoteAgentRegistry) GetRegisteredAgentStatuses() []*remoteagentregistry.StatusData {
+ queryTimeout := ra.getQueryTimeout()
+
+ ra.agentMapMu.Lock()
+
+ agentsLen := len(ra.agentMap)
+ statusMap := make(map[string]*remoteagentregistry.StatusData, agentsLen)
+ agentStatuses := make([]*remoteagentregistry.StatusData, 0, agentsLen)
+
+ // Return early if we have no registered remote agents.
+ if agentsLen == 0 {
+ ra.agentMapMu.Unlock()
+ return agentStatuses
+ }
+
+ // We preload the status map with a response that indicates timeout, since we want to ensure there's an entry for
+ // every registered remote agent even if we don't get a response back (whether good or bad) from them.
+ for agentID, details := range ra.agentMap {
+ statusMap[agentID] = &remoteagentregistry.StatusData{
+ AgentID: agentID,
+ DisplayName: details.displayName,
+ FailureReason: fmt.Sprintf("Timed out after waiting %s for response.", queryTimeout),
+ }
+ }
+
+ data := make(chan *remoteagentregistry.StatusData, agentsLen)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ for agentID, details := range ra.agentMap {
+ displayName := details.displayName
+
+ go func() {
+ // We push any errors into "failure reason" which ends up getting shown in the status details.
+ resp, err := details.client.GetStatusDetails(ctx, &pb.GetStatusDetailsRequest{}, grpc.WaitForReady(true))
+ if err != nil {
+ data <- &remoteagentregistry.StatusData{
+ AgentID: agentID,
+ DisplayName: displayName,
+ FailureReason: fmt.Sprintf("Failed to query for status: %v", err),
+ }
+ return
+ }
+
+ data <- raproto.ProtobufToStatusData(agentID, displayName, resp)
+ }()
+ }
+
+ ra.agentMapMu.Unlock()
+
+ timeout := time.After(queryTimeout)
+ responsesRemaining := agentsLen
+
+collect:
+ for {
+ select {
+ case statusData := <-data:
+ statusMap[statusData.AgentID] = statusData
+ responsesRemaining--
+ case <-timeout:
+ break collect
+ default:
+ if responsesRemaining == 0 {
+ break collect
+ }
+ }
+ }
+
+ // Migrate the final status data from the map into our slice, for easier consumption.
+ for _, statusData := range statusMap {
+ agentStatuses = append(agentStatuses, statusData)
+ }
+
+ return agentStatuses
+}
+
+func (ra *remoteAgentRegistry) fillFlare(builder flarebuilder.FlareBuilder) error {
+ queryTimeout := ra.getQueryTimeout()
+
+ ra.agentMapMu.Lock()
+
+ agentsLen := len(ra.agentMap)
+ flareMap := make(map[string]*remoteagentregistry.FlareData, agentsLen)
+
+ // Return early if we have no registered remote agents.
+ if agentsLen == 0 {
+ ra.agentMapMu.Unlock()
+ return nil
+ }
+
+ data := make(chan *remoteagentregistry.FlareData, agentsLen)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ for agentID, details := range ra.agentMap {
+ go func() {
+ // We push any errors into "failure reason" which ends up getting shown in the status details.
+ resp, err := details.client.GetFlareFiles(ctx, &pb.GetFlareFilesRequest{}, grpc.WaitForReady(true))
+ if err != nil {
+ log.Warnf("Failed to query remote agent '%s' for flare data: %v", agentID, err)
+ data <- nil
+ return
+ }
+
+ data <- raproto.ProtobufToFlareData(agentID, resp)
+ }()
+ }
+
+ ra.agentMapMu.Unlock()
+
+ timeout := time.After(queryTimeout)
+ responsesRemaining := agentsLen
+
+collect:
+ for {
+ select {
+ case flareData := <-data:
+ flareMap[flareData.AgentID] = flareData
+ responsesRemaining--
+ case <-timeout:
+ break collect
+ default:
+ if responsesRemaining == 0 {
+ break collect
+ }
+ }
+ }
+
+ // We've collected all the flare data we can, so now we add it to the flare builder.
+ for agentID, flareData := range flareMap {
+ if flareData == nil {
+ continue
+ }
+
+ for fileName, fileData := range flareData.Files {
+ err := builder.AddFile(fmt.Sprintf("%s/%s", agentID, util.SanitizeFileName(fileName)), fileData)
+ if err != nil {
+ return fmt.Errorf("failed to add file '%s' from remote agent '%s' to flare: %w", fileName, agentID, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func newRemoteAgentClient(registration *remoteagentregistry.RegistrationData) (pb.RemoteAgentClient, error) {
+ // NOTE: we're using InsecureSkipVerify because the gRPC server only
+ // persists its TLS certs in memory, and we currently have no
+ // infrastructure to make them available to clients. This is NOT
+ // equivalent to grpc.WithInsecure(), since that assumes a non-TLS
+ // connection.
+ tlsCreds := credentials.NewTLS(&tls.Config{
+ InsecureSkipVerify: true,
+ })
+
+ conn, err := grpc.NewClient(registration.APIEndpoint,
+ grpc.WithTransportCredentials(tlsCreds),
+ grpc.WithPerRPCCredentials(ddgrpc.NewBearerTokenAuth(registration.AuthToken)),
+ // Set on the higher side to account for the fact that flare file data could be larger than the default 4MB limit.
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(64*1024*1024)),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return pb.NewRemoteAgentClient(conn), nil
+}
+
+type remoteAgentDetails struct {
+ lastSeen time.Time
+ displayName string
+ apiEndpoint string
+ client pb.RemoteAgentClient
+}
+
+func newRemoteAgentDetails(registration *remoteagentregistry.RegistrationData) (*remoteAgentDetails, error) {
+ client, err := newRemoteAgentClient(registration)
+ if err != nil {
+ return nil, err
+ }
+
+ return &remoteAgentDetails{
+ displayName: registration.DisplayName,
+ apiEndpoint: registration.APIEndpoint,
+ client: client,
+ lastSeen: time.Now(),
+ }, nil
+}
diff --git a/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go
new file mode 100644
index 0000000000000..91b198c7a8c9a
--- /dev/null
+++ b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go
@@ -0,0 +1,291 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package remoteagentregistryimpl
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "testing"
+
+ helpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers"
+ remoteagent "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def"
+ compdef "github.com/DataDog/datadog-agent/comp/def"
+ configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
+ configmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
+ grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "github.com/DataDog/datadog-agent/pkg/api/security"
+ grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc"
+)
+
+func TestRemoteAgentCreation(t *testing.T) {
+ provides, lc := buildComponent(t)
+
+ assert.NotNil(t, provides.Comp)
+ assert.NotNil(t, provides.FlareProvider)
+ assert.NotNil(t, provides.Status)
+
+ lc.AssertHooksNumber(1)
+
+ ctx := context.Background()
+ assert.NoError(t, lc.Start(ctx))
+ assert.NoError(t, lc.Stop(ctx))
+}
+
+func TestRecommendedRefreshInterval(t *testing.T) {
+ expectedRefreshIntervalSecs := uint32(27)
+ config := configmock.New(t)
+ config.SetWithoutSource("remote_agent_registry.recommended_refresh_interval", fmt.Sprintf("%ds", expectedRefreshIntervalSecs))
+
+ provides, _ := buildComponentWithConfig(t, config)
+ component := provides.Comp
+
+ registrationData := &remoteagent.RegistrationData{
+ AgentID: "test-agent",
+ DisplayName: "Test Agent",
+ APIEndpoint: "localhost:1234",
+ AuthToken: "",
+ }
+
+ actualRefreshIntervalSecs, err := component.RegisterRemoteAgent(registrationData)
+ require.NoError(t, err)
+ require.Equal(t, expectedRefreshIntervalSecs, actualRefreshIntervalSecs)
+
+ agents := component.GetRegisteredAgents()
+ require.Len(t, agents, 1)
+ require.Equal(t, "Test Agent", agents[0].DisplayName)
+}
+
+func TestGetRegisteredAgents(t *testing.T) {
+ provides, _ := buildComponent(t)
+ component := provides.Comp
+
+ registrationData := &remoteagent.RegistrationData{
+ AgentID: "test-agent",
+ DisplayName: "Test Agent",
+ APIEndpoint: "localhost:1234",
+ AuthToken: "",
+ }
+
+ _, err := component.RegisterRemoteAgent(registrationData)
+ require.NoError(t, err)
+
+ agents := component.GetRegisteredAgents()
+ require.Len(t, agents, 1)
+ require.Equal(t, "Test Agent", agents[0].DisplayName)
+}
+
+func TestGetRegisteredAgentStatuses(t *testing.T) {
+ provides, _ := buildComponent(t)
+ component := provides.Comp
+
+ remoteAgentServer := &testRemoteAgentServer{
+ StatusMain: map[string]string{
+ "test_key": "test_value",
+ },
+ }
+
+ server, port := buildRemoteAgentServer(t, remoteAgentServer)
+ defer server.Stop()
+
+ registrationData := &remoteagent.RegistrationData{
+ AgentID: "test-agent",
+ DisplayName: "Test Agent",
+ APIEndpoint: fmt.Sprintf("localhost:%d", port),
+ AuthToken: "testing",
+ }
+
+ _, err := component.RegisterRemoteAgent(registrationData)
+ require.NoError(t, err)
+
+ statuses := component.GetRegisteredAgentStatuses()
+ require.Len(t, statuses, 1)
+ require.Equal(t, "test-agent", statuses[0].AgentID)
+ require.Equal(t, "Test Agent", statuses[0].DisplayName)
+ require.Equal(t, "test_value", statuses[0].MainSection["test_key"])
+}
+
+func TestFlareProvider(t *testing.T) {
+ provides, _ := buildComponent(t)
+ component := provides.Comp
+ flareProvider := provides.FlareProvider
+
+ remoteAgentServer := &testRemoteAgentServer{
+ FlareFiles: map[string][]byte{
+ "test_file.yaml": []byte("test_content"),
+ },
+ }
+
+ server, port := buildRemoteAgentServer(t, remoteAgentServer)
+ defer server.Stop()
+
+ registrationData := &remoteagent.RegistrationData{
+ AgentID: "test-agent",
+ DisplayName: "Test Agent",
+ APIEndpoint: fmt.Sprintf("localhost:%d", port),
+ AuthToken: "testing",
+ }
+
+ _, err := component.RegisterRemoteAgent(registrationData)
+ require.NoError(t, err)
+
+ fb := helpers.NewFlareBuilderMock(t, false)
+ fb.AssertNoFileExists("test-agent/test_file.yaml")
+
+ err = flareProvider.Callback(fb.Fb)
+ require.NoError(t, err)
+ fb.AssertFileExists("test-agent/test_file.yaml")
+ fb.AssertFileContent("test_content", "test-agent/test_file.yaml")
+}
+
+func TestStatusProvider(t *testing.T) {
+ provides, _ := buildComponent(t)
+ component := provides.Comp
+ statusProvider := provides.Status
+
+ remoteAgentServer := &testRemoteAgentServer{
+ StatusMain: map[string]string{
+ "test_key": "test_value",
+ },
+ }
+
+ server, port := buildRemoteAgentServer(t, remoteAgentServer)
+ defer server.Stop()
+
+ registrationData := &remoteagent.RegistrationData{
+ AgentID: "test-agent",
+ DisplayName: "Test Agent",
+ APIEndpoint: fmt.Sprintf("localhost:%d", port),
+ AuthToken: "testing",
+ }
+
+ _, err := component.RegisterRemoteAgent(registrationData)
+ require.NoError(t, err)
+
+ statusData := make(map[string]interface{})
+ err = statusProvider.Provider.JSON(false, statusData)
+ require.NoError(t, err)
+
+ require.Len(t, statusData, 2)
+
+ registeredAgents, ok := statusData["registeredAgents"].([]*remoteagent.RegisteredAgent)
+ if !ok {
+ t.Fatalf("registeredAgents is not a slice of RegisteredAgent")
+ }
+ require.Len(t, registeredAgents, 1)
+ require.Equal(t, "Test Agent", registeredAgents[0].DisplayName)
+
+ registeredAgentStatuses, ok := statusData["registeredAgentStatuses"].([]*remoteagent.StatusData)
+ if !ok {
+ t.Fatalf("registeredAgentStatuses is not a slice of StatusData")
+ }
+ require.Len(t, registeredAgentStatuses, 1)
+ require.Equal(t, "test-agent", registeredAgentStatuses[0].AgentID)
+ require.Equal(t, "Test Agent", registeredAgentStatuses[0].DisplayName)
+ require.Equal(t, "test_value", registeredAgentStatuses[0].MainSection["test_key"])
+}
+
+func buildComponent(t *testing.T) (Provides, *compdef.TestLifecycle) {
+ return buildComponentWithConfig(t, configmock.New(t))
+}
+
+func buildComponentWithConfig(t *testing.T, config configmodel.Config) (Provides, *compdef.TestLifecycle) {
+ lc := compdef.NewTestLifecycle(t)
+ reqs := Requires{
+ Config: config,
+ Lifecycle: lc,
+ }
+
+ return NewComponent(reqs), lc
+}
+
+type testRemoteAgentServer struct {
+ StatusMain map[string]string
+ StatusNamed map[string]map[string]string
+ FlareFiles map[string][]byte
+}
+
+func (t *testRemoteAgentServer) GetStatusDetails(context.Context, *pbgo.GetStatusDetailsRequest) (*pbgo.GetStatusDetailsResponse, error) {
+ namedSections := make(map[string]*pbgo.StatusSection)
+ for name, fields := range t.StatusNamed {
+ namedSections[name] = &pbgo.StatusSection{
+ Fields: fields,
+ }
+ }
+
+ return &pbgo.GetStatusDetailsResponse{
+ MainSection: &pbgo.StatusSection{
+ Fields: t.StatusMain,
+ },
+ NamedSections: namedSections,
+ }, nil
+}
+
+func (t *testRemoteAgentServer) GetFlareFiles(context.Context, *pbgo.GetFlareFilesRequest) (*pbgo.GetFlareFilesResponse, error) {
+ return &pbgo.GetFlareFilesResponse{
+ Files: t.FlareFiles,
+ }, nil
+}
+
+func buildRemoteAgentServer(t *testing.T, remoteAgentServer *testRemoteAgentServer) (*grpc.Server, uint16) {
+ tlsKeyPair, err := buildSelfSignedTLSCertificate()
+ require.NoError(t, err)
+
+ // Make sure we can listen on the intended address.
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ serverOpts := []grpc.ServerOption{
+ grpc.Creds(credentials.NewServerTLSFromCert(tlsKeyPair)),
+ grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(grpcutil.StaticAuthInterceptor("testing"))),
+ }
+
+ server := grpc.NewServer(serverOpts...)
+ pbgo.RegisterRemoteAgentServer(server, remoteAgentServer)
+
+ go func() {
+ err := server.Serve(listener)
+ require.NoError(t, err)
+ }()
+
+ _, portStr, err := net.SplitHostPort(listener.Addr().String())
+ require.NoError(t, err)
+ port, err := strconv.Atoi(portStr)
+ require.NoError(t, err)
+
+ return server, uint16(port)
+}
+
+func buildSelfSignedTLSCertificate() (*tls.Certificate, error) {
+ hosts := []string{"localhost"}
+ _, certPEM, key, err := security.GenerateRootCert(hosts, 2048)
+ if err != nil {
+ return nil, errors.New("unable to generate certificate")
+ }
+
+ // PEM encode the private key
+ keyPEM := pem.EncodeToMemory(&pem.Block{
+ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key),
+ })
+
+ pair, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ return nil, fmt.Errorf("unable to generate TLS key pair: %v", err)
+ }
+
+ return &pair, nil
+}
diff --git a/comp/core/remoteagentregistry/mock/mock.go b/comp/core/remoteagentregistry/mock/mock.go
new file mode 100644
index 0000000000000..d930c9669305e
--- /dev/null
+++ b/comp/core/remoteagentregistry/mock/mock.go
@@ -0,0 +1,20 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build test
+
+// Package mock provides a mock for the remoteagentregistry component
+package mock
+
+import (
+ "testing"
+
+ remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def"
+)
+
+// Mock returns a mock for remoteagentregistry component.
+func Mock(_ *testing.T) remoteagentregistry.Component {
+ return nil
+}
diff --git a/comp/core/remoteagentregistry/proto/proto.go b/comp/core/remoteagentregistry/proto/proto.go
new file mode 100644
index 0000000000000..a38aa6d5ebce3
--- /dev/null
+++ b/comp/core/remoteagentregistry/proto/proto.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package proto provides functions to convert between protobuf and remoteagent types.
+package proto
+
+import (
+ remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def"
+ pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
+)
+
+// ProtobufToRemoteAgentRegistration converts the protobuf representation of a remote agent registration to the internal type.
+func ProtobufToRemoteAgentRegistration(in *pb.RegisterRemoteAgentRequest) *remoteagentregistry.RegistrationData {
+ return &remoteagentregistry.RegistrationData{
+ AgentID: in.Id,
+ DisplayName: in.DisplayName,
+ APIEndpoint: in.ApiEndpoint,
+ AuthToken: in.AuthToken,
+ }
+}
+
+// ProtobufToFlareData converts the protobuf representation of flare data to the internal type.
+func ProtobufToFlareData(agentID string, resp *pb.GetFlareFilesResponse) *remoteagentregistry.FlareData {
+ return &remoteagentregistry.FlareData{
+ AgentID: agentID,
+ Files: resp.Files,
+ }
+}
+
+// ProtobufToStatusData converts the protobuf representation of status data to the internal type.
+func ProtobufToStatusData(agentID string, displayName string, resp *pb.GetStatusDetailsResponse) *remoteagentregistry.StatusData {
+ return &remoteagentregistry.StatusData{
+ AgentID: agentID,
+ DisplayName: displayName,
+ MainSection: protobufToStatusSection(resp.MainSection),
+ NamedSections: protobufToNamedSections(resp.NamedSections),
+ }
+}
+
+func protobufToStatusSection(statusSection *pb.StatusSection) remoteagentregistry.StatusSection {
+ return statusSection.Fields
+}
+
+func protobufToNamedSections(namedSections map[string]*pb.StatusSection) map[string]remoteagentregistry.StatusSection {
+ sections := make(map[string]remoteagentregistry.StatusSection, len(namedSections))
+
+ for name, section := range namedSections {
+ sections[name] = protobufToStatusSection(section)
+ }
+
+ return sections
+}
diff --git a/comp/core/remoteagentregistry/status/status.go b/comp/core/remoteagentregistry/status/status.go
new file mode 100644
index 0000000000000..dc32a9b2c0d39
--- /dev/null
+++ b/comp/core/remoteagentregistry/status/status.go
@@ -0,0 +1,69 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package status fetch information needed to render the 'remote agents' section of the status page
+package status
+
+import (
+ "embed"
+ "io"
+
+ remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def"
+ "github.com/DataDog/datadog-agent/comp/core/status"
+)
+
+// populateStatus populates the status stats
+func populateStatus(registry remoteagentregistry.Component, stats map[string]interface{}) {
+ stats["registeredAgents"] = registry.GetRegisteredAgents()
+ stats["registeredAgentStatuses"] = registry.GetRegisteredAgentStatuses()
+}
+
+//go:embed status_templates
+var templatesFS embed.FS
+
+// Provider provides the functionality to populate the status output
+type Provider struct {
+ registry remoteagentregistry.Component
+}
+
+// GetProvider returns status.Provider
+func GetProvider(registry remoteagentregistry.Component) status.Provider {
+ return Provider{registry: registry}
+}
+
+func (p Provider) getStatusInfo() map[string]interface{} {
+ stats := make(map[string]interface{})
+
+ populateStatus(p.registry, stats)
+
+ return stats
+}
+
+// Name returns the name
+func (p Provider) Name() string {
+ return "Remote Agents"
+}
+
+// Section return the section
+func (p Provider) Section() string {
+ return "Remote Agents"
+}
+
+// JSON populates the status map
+func (p Provider) JSON(_ bool, stats map[string]interface{}) error {
+ populateStatus(p.registry, stats)
+
+ return nil
+}
+
+// Text renders the text output
+func (p Provider) Text(_ bool, buffer io.Writer) error {
+ return status.RenderText(templatesFS, "remote_agents.tmpl", buffer, p.getStatusInfo())
+}
+
+// HTML renders the html output
+func (p Provider) HTML(_ bool, _ io.Writer) error {
+ return nil
+}
diff --git a/comp/core/remoteagentregistry/status/status_templates/remote_agents.tmpl b/comp/core/remoteagentregistry/status/status_templates/remote_agents.tmpl
new file mode 100644
index 0000000000000..9e7545635e01f
--- /dev/null
+++ b/comp/core/remoteagentregistry/status/status_templates/remote_agents.tmpl
@@ -0,0 +1,38 @@
+{{ if not .registeredAgents }}
+No remote agents registered
+{{ else }}
+{{ len .registeredAgents }} remote agent(s) registered:
+{{- range $agent := .registeredAgents }}
+
+ {{ $agent.DisplayName }}
+ {{ printDashes $agent.DisplayName "-" }}
+ Last seen: {{ formatUnixTime $agent.LastSeenUnix }} ({{ formatUnixTimeSince $agent.LastSeenUnix }})
+{{- end }}
+{{ end }}
+
+{{- with .registeredAgentStatuses }}
+{{- range $agentStatus := . }}
+{{- with $agentStatus }}
+{{ printDashes .DisplayName "=" }}
+{{ .DisplayName }}
+{{ printDashes .DisplayName "=" }}
+{{ if .FailureReason }}
+{{ .FailureReason }}
+{{ end }}
+{{- if .MainSection }}
+{{- range $key, $value := .MainSection }}
+{{ $key }}: {{ $value }}
+{{ end }}
+{{- end }}
+{{- if .NamedSections }}
+{{- range $sectionName, $section := .NamedSections }}
+ {{ $sectionName }}
+ {{ printDashes $sectionName "-" }}
+{{- range $key, $value := $section }}
+ {{ $key }}: {{ $value }}
+{{- end }}
+{{ end }}
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/comp/core/remoteagentregistry/util/sanitize.go b/comp/core/remoteagentregistry/util/sanitize.go
new file mode 100644
index 0000000000000..417d238cb003a
--- /dev/null
+++ b/comp/core/remoteagentregistry/util/sanitize.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package util provides utility functions for the remoteagent component.
+package util
+
+import (
+ "regexp"
+ "strings"
+)
+
+var agentIDSanitizeRegex = regexp.MustCompile(`[^a-zA-Z0-9-_]`)
+var fileNameSanitizeRegex = regexp.MustCompile(`[^a-zA-Z0-9-_\.]`)
+
+// SanitizeAgentID sanitizes a string to be used as an agent ID.
+//
+// All characters are not ASCII alphanumerics, underscores, or hyphens are replaced with an underscore, and the string
+// is converted to lowercase.
+func SanitizeAgentID(agentID string) string {
+ agentID = agentIDSanitizeRegex.ReplaceAllString(agentID, "_")
+ return strings.ToLower(agentID)
+}
+
+// SanitizeFileName sanitizes a string to be used as a file name.
+//
+// All characters that are not ASCII alphanumerics, underscores, or hyphens are replaced with an underscore, and the
+// string is trimmed of extraneous whitespace and limited to 255 characters in length.
+func SanitizeFileName(fileName string) string {
+ fileName = fileNameSanitizeRegex.ReplaceAllString(fileName, "_")
+ fileName = strings.TrimSpace(fileName)
+ if len(fileName) > 255 {
+ fileName = fileName[:255]
+ }
+
+ return fileName
+}
diff --git a/comp/core/remoteagentregistry/util/sanitize_test.go b/comp/core/remoteagentregistry/util/sanitize_test.go
new file mode 100644
index 0000000000000..d581ac843e5c9
--- /dev/null
+++ b/comp/core/remoteagentregistry/util/sanitize_test.go
@@ -0,0 +1,94 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2020-present Datadog, Inc.
+
+package util
+
+import "testing"
+
+func TestSanitizeAgentID(t *testing.T) {
+ tests := []struct {
+ name string
+ agentID string
+ expected string
+ }{
+ {
+ name: "empty",
+ agentID: "",
+ expected: "",
+ },
+ {
+ name: "no special characters",
+ agentID: "agentID",
+ expected: "agentid",
+ },
+ {
+ name: "with special characters",
+ agentID: "agentID@123",
+ expected: "agentid_123",
+ },
+ {
+ name: "with spaces",
+ agentID: "agent ID",
+ expected: "agent_id",
+ },
+ {
+ name: "with special characters and spaces",
+ agentID: "agent ID@123",
+ expected: "agent_id_123",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := SanitizeAgentID(tt.agentID)
+ if actual != tt.expected {
+ t.Errorf("expected: %s, got: %s", tt.expected, actual)
+ }
+ })
+ }
+}
+
+func TestSanitizeFileName(t *testing.T) {
+ tests := []struct {
+ name string
+ fileName string
+ expected string
+ }{
+ {
+ name: "empty",
+ fileName: "",
+ expected: "",
+ },
+ {
+ name: "with spaces",
+ fileName: "my file.log",
+ expected: "my_file.log",
+ },
+ {
+ name: "with special characters",
+ fileName: "fileName@123",
+ expected: "fileName_123",
+ },
+ {
+ name: "with special characters and spaces",
+ fileName: "file Name@123",
+ expected: "file_Name_123",
+ },
+ {
+ name: "with special characters and spaces and long name",
+ fileName: "in-west-philadelphia-born-and-raised-on-the-playground-was-where-i-spent-most-of-my-days-chillin-out-maxin-relaxin-all-cool-and-all-shootin-some-b-ball-outside-of-the-school-when-a-couple-of-guys-who-were-up-to-no-good-started-making-trouble-in-my-neighborhood-i-got-in-one-little-fight-and-my-mom-got-scared-she-said-youre-movin-with-your-auntie-and-uncle-in-bel-air",
+ expected: "in-west-philadelphia-born-and-raised-on-the-playground-was-where-i-spent-most-of-my-days-chillin-out-maxin-relaxin-all-cool-and-all-shootin-some-b-ball-outside-of-the-school-when-a-couple-of-guys-who-were-up-to-no-good-started-making-trouble-in-my-neighbo",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := SanitizeFileName(tt.fileName)
+ if actual != tt.expected {
+ t.Errorf("expected: %s, got: %s", tt.expected, actual)
+ }
+ })
+ }
+}
diff --git a/comp/core/status/render_helpers.go b/comp/core/status/render_helpers.go
index 69a5db4f1d4b4..7ee0830c50e1b 100644
--- a/comp/core/status/render_helpers.go
+++ b/comp/core/status/render_helpers.go
@@ -39,27 +39,28 @@ var (
func HTMLFmap() htemplate.FuncMap {
htmlFuncOnce.Do(func() {
htmlFuncMap = htemplate.FuncMap{
- "doNotEscape": doNotEscape,
- "lastError": lastError,
- "configError": configError,
- "printDashes": PrintDashes,
- "formatUnixTime": formatUnixTime,
- "humanize": mkHuman,
- "humanizeDuration": mkHumanDuration,
- "toUnsortedList": toUnsortedList,
- "formatTitle": formatTitle,
- "add": add,
- "redText": redText,
- "yellowText": yellowText,
- "greenText": greenText,
- "ntpWarning": ntpWarning,
- "version": getVersion,
- "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) },
- "complianceResult": complianceResult,
- "lastErrorTraceback": lastErrorTracebackHTML,
- "lastErrorMessage": lastErrorMessageHTML,
- "pythonLoaderError": pythonLoaderErrorHTML,
- "status": statusHTML,
+ "doNotEscape": doNotEscape,
+ "lastError": lastError,
+ "configError": configError,
+ "printDashes": PrintDashes,
+ "formatUnixTime": formatUnixTime,
+ "formatUnixTimeSince": formatUnixTimeSince,
+ "humanize": mkHuman,
+ "humanizeDuration": mkHumanDuration,
+ "toUnsortedList": toUnsortedList,
+ "formatTitle": formatTitle,
+ "add": add,
+ "redText": redText,
+ "yellowText": yellowText,
+ "greenText": greenText,
+ "ntpWarning": ntpWarning,
+ "version": getVersion,
+ "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) },
+ "complianceResult": complianceResult,
+ "lastErrorTraceback": lastErrorTracebackHTML,
+ "lastErrorMessage": lastErrorMessageHTML,
+ "pythonLoaderError": pythonLoaderErrorHTML,
+ "status": statusHTML,
}
})
return htmlFuncMap
@@ -69,24 +70,25 @@ func HTMLFmap() htemplate.FuncMap {
func TextFmap() ttemplate.FuncMap {
textFuncOnce.Do(func() {
textFuncMap = ttemplate.FuncMap{
- "lastErrorTraceback": lastErrorTraceback,
- "lastErrorMessage": lastErrorMessage,
- "printDashes": PrintDashes,
- "formatUnixTime": formatUnixTime,
- "formatJSON": formatJSON,
- "humanize": mkHuman,
- "humanizeDuration": mkHumanDuration,
- "toUnsortedList": toUnsortedList,
- "formatTitle": formatTitle,
- "add": add,
- "status": status,
- "redText": redText,
- "yellowText": yellowText,
- "greenText": greenText,
- "ntpWarning": ntpWarning,
- "version": getVersion,
- "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) },
- "complianceResult": complianceResult,
+ "lastErrorTraceback": lastErrorTraceback,
+ "lastErrorMessage": lastErrorMessage,
+ "printDashes": PrintDashes,
+ "formatUnixTime": formatUnixTime,
+ "formatUnixTimeSince": formatUnixTimeSince,
+ "formatJSON": formatJSON,
+ "humanize": mkHuman,
+ "humanizeDuration": mkHumanDuration,
+ "toUnsortedList": toUnsortedList,
+ "formatTitle": formatTitle,
+ "add": add,
+ "status": status,
+ "redText": redText,
+ "yellowText": yellowText,
+ "greenText": greenText,
+ "ntpWarning": ntpWarning,
+ "version": getVersion,
+ "percent": func(v float64) string { return fmt.Sprintf("%02.1f", v*100) },
+ "complianceResult": complianceResult,
}
})
@@ -152,34 +154,59 @@ func lastErrorMessage(value string) string {
}
// formatUnixTime formats the unix time to make it more readable
-func formatUnixTime(unixTime any) string {
- // Initially treat given unixTime is in nanoseconds
- parseFunction := func(value int64) string {
- t := time.Unix(0, value)
- // If year returned 1970, assume unixTime actually in seconds
- if t.Year() == time.Unix(0, 0).Year() {
- t = time.Unix(value, 0)
- }
+func formatUnixTime(rawUnixTime any) string {
+ t, err := parseUnixTime(rawUnixTime)
+ if err != nil {
+ return err.Error()
+ }
- _, tzoffset := t.Zone()
- result := t.Format(timeFormat)
- if tzoffset != 0 {
- result += " / " + t.UTC().Format(timeFormat)
- }
- msec := t.UnixNano() / int64(time.Millisecond)
- result += " (" + strconv.Itoa(int(msec)) + ")"
+ _, tzoffset := t.Zone()
+ result := t.Format(timeFormat)
+ if tzoffset != 0 {
+ result += " / " + t.UTC().Format(timeFormat)
+ }
+ msec := t.UnixNano() / int64(time.Millisecond)
+ result += " (" + strconv.Itoa(int(msec)) + ")"
+
+ return result
+}
- return result
+// formatUnixTimeSince parses a Unix timestamp and calculates the elapsed time between the timestamp and the current
+// time and formats the duration in a human-readable format
+func formatUnixTimeSince(rawUnixTime any) string {
+ t, err := parseUnixTime(rawUnixTime)
+ if err != nil {
+ return err.Error()
}
- switch v := unixTime.(type) {
+ now := time.Now()
+
+ if t.After(now) {
+ delta := t.Sub(now)
+ return fmt.Sprintf("%s from now", delta)
+ }
+
+ delta := now.Sub(t)
+ return fmt.Sprintf("%s ago", delta)
+}
+
+func parseUnixTime(value any) (time.Time, error) {
+ raw := int64(0)
+ switch v := value.(type) {
case int64:
- return parseFunction(v)
+ raw = v
case float64:
- return parseFunction(int64(v))
+ raw = int64(v)
default:
- return fmt.Sprintf("Invalid time parameter %T", v)
+ return time.Time{}, fmt.Errorf("invalid time parameter %T", v)
+ }
+
+ t := time.Unix(0, raw)
+ // If year returned 1970, assume unixTime actually in seconds
+ if t.Year() == time.Unix(0, 0).Year() {
+ t = time.Unix(raw, 0)
}
+ return t, nil
}
// formatJSON formats the given value as JSON. The indent parameter is used to indent the entire JSON output.
diff --git a/comp/core/tagger/taggerimpl/api/getlist.go b/comp/core/tagger/api/getlist.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/api/getlist.go
rename to comp/core/tagger/api/getlist.go
diff --git a/comp/core/tagger/taggerimpl/collectors/doc.go b/comp/core/tagger/collectors/doc.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/doc.go
rename to comp/core/tagger/collectors/doc.go
diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common.go b/comp/core/tagger/collectors/ecs_common.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/ecs_common.go
rename to comp/core/tagger/collectors/ecs_common.go
diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go b/comp/core/tagger/collectors/ecs_common_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/ecs_common_test.go
rename to comp/core/tagger/collectors/ecs_common_test.go
diff --git a/comp/core/tagger/taggerimpl/collectors/pod_tag_extractor.go b/comp/core/tagger/collectors/pod_tag_extractor.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/pod_tag_extractor.go
rename to comp/core/tagger/collectors/pod_tag_extractor.go
diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/collectors/workloadmeta_extract.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go
rename to comp/core/tagger/collectors/workloadmeta_extract.go
diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go b/comp/core/tagger/collectors/workloadmeta_main.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/workloadmeta_main.go
rename to comp/core/tagger/collectors/workloadmeta_main.go
diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/collectors/workloadmeta_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go
rename to comp/core/tagger/collectors/workloadmeta_test.go
diff --git a/comp/core/tagger/component_mock.go b/comp/core/tagger/component_mock.go
deleted file mode 100644
index 6d52dbf4d51c8..0000000000000
--- a/comp/core/tagger/component_mock.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-//go:build test
-// +build test
-
-package tagger
-
-import "github.com/DataDog/datadog-agent/comp/core/tagger/types"
-
-// Mock implements mock-specific methods for the tagger component.
-type Mock interface {
- Component
-
- // SetTags allows to set tags in the mock fake tagger
- SetTags(entityID types.EntityID, source string, low, orch, high, std []string)
-
- // SetGlobalTags allows to set tags in store for the global entity
- SetGlobalTags(low, orch, high, std []string)
-}
diff --git a/comp/core/tagger/component.go b/comp/core/tagger/def/component.go
similarity index 83%
rename from comp/core/tagger/component.go
rename to comp/core/tagger/def/component.go
index 54e137578dffb..c580325c3bf81 100644
--- a/comp/core/tagger/component.go
+++ b/comp/core/tagger/def/component.go
@@ -3,14 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package tagger implements the Tagger component. The Tagger is the central
-// source of truth for client-side entity tagging. It subscribes to workloadmeta
-// to get updates for all the entity kinds (containers, kubernetes pods,
-// kubernetes nodes, etc.) and extracts the tags for each of them. Tags are then
-// stored in memory (by the TagStore) and can be queried by the tagger.Tag()
-// method.
-
-// Package tagger provides the tagger component for the Datadog Agent
+// Package tagger provides the tagger interface for the Datadog Agent
package tagger
import (
diff --git a/comp/core/tagger/def/params.go b/comp/core/tagger/def/params.go
new file mode 100644
index 0000000000000..2abe12d279fe7
--- /dev/null
+++ b/comp/core/tagger/def/params.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+package tagger
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/config"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/types"
+)
+
+// RemoteParams provides remote tagger parameters
+type RemoteParams struct {
+ // RemoteFilter is the filter to apply to the remote tagger when streaming tag events
+ RemoteFilter *types.Filter
+ // RemoteTarget function return the target in which the remote tagger will connect
+ // If it returns an error we stop the application
+ RemoteTarget func(config.Component) (string, error)
+ // RemoteTokenFetcher is the function to fetch the token for the remote tagger
+ // If it returns an error the remote tagger will continue to attempt to fetch the token
+ RemoteTokenFetcher func(config.Component) func() (string, error)
+}
+
+// Params provides local tagger parameters
+type Params struct {
+ // UseFakeTagger is a flag to enable the fake tagger. Only use for testing
+ UseFakeTagger bool
+}
+
+// DualParams provides dual tagger parameters
+type DualParams struct {
+ // UseRemote is a function to determine if the remote tagger should be used
+ UseRemote func(config.Component) bool
+}
diff --git a/comp/core/tagger/fx-dual/fx.go b/comp/core/tagger/fx-dual/fx.go
new file mode 100644
index 0000000000000..35fbc6d09dffa
--- /dev/null
+++ b/comp/core/tagger/fx-dual/fx.go
@@ -0,0 +1,28 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package fx provides the fx module for the dual tagger component
+package fx
+
+import (
+ "go.uber.org/fx"
+
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ dualimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl-dual"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Module defines the fx options for this component
+func Module(dualParams tagger.DualParams, localParams tagger.Params, remoteParams tagger.RemoteParams) fxutil.Module {
+ return fxutil.Component(
+ fxutil.ProvideComponentConstructor(
+ dualimpl.NewComponent,
+ ),
+ fx.Supply(localParams),
+ fx.Supply(remoteParams),
+ fx.Supply(dualParams),
+ fxutil.ProvideOptional[tagger.Component](),
+ )
+}
diff --git a/comp/core/tagger/fx-noop/fx.go b/comp/core/tagger/fx-noop/fx.go
new file mode 100644
index 0000000000000..df65575eb2c5b
--- /dev/null
+++ b/comp/core/tagger/fx-noop/fx.go
@@ -0,0 +1,23 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package fx provides the fx module for the noop tagger component
+package fx
+
+import (
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ nooptaggerimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Module defines the fx options for this component
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fxutil.ProvideComponentConstructor(
+ nooptaggerimpl.NewComponent,
+ ),
+ fxutil.ProvideOptional[tagger.Component](),
+ )
+}
diff --git a/comp/core/tagger/fx-remote/fx.go b/comp/core/tagger/fx-remote/fx.go
new file mode 100644
index 0000000000000..156552de72131
--- /dev/null
+++ b/comp/core/tagger/fx-remote/fx.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package fx provides the fx module for the remote tagger component
+package fx
+
+import (
+ "go.uber.org/fx"
+
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ remotetaggerimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Module defines the fx options for this component
+func Module(params tagger.RemoteParams) fxutil.Module {
+ return fxutil.Component(
+ fxutil.ProvideComponentConstructor(
+ remotetaggerimpl.NewComponent,
+ ),
+ fx.Supply(params),
+ fxutil.ProvideOptional[tagger.Component](),
+ )
+}
diff --git a/comp/core/tagger/fx/fx.go b/comp/core/tagger/fx/fx.go
new file mode 100644
index 0000000000000..fb6205c87c76b
--- /dev/null
+++ b/comp/core/tagger/fx/fx.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package fx provides the fx module for the tagger component
+package fx
+
+import (
+ "go.uber.org/fx"
+
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Module defines the fx options for this component
+func Module(params tagger.Params) fxutil.Module {
+ return fxutil.Component(
+ fxutil.ProvideComponentConstructor(
+ taggerimpl.NewComponent,
+ ),
+ fx.Supply(params),
+ fxutil.ProvideOptional[tagger.Component](),
+ )
+}
diff --git a/comp/core/tagger/taggerimpl/generic_store/composite_store.go b/comp/core/tagger/generic_store/composite_store.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/generic_store/composite_store.go
rename to comp/core/tagger/generic_store/composite_store.go
diff --git a/comp/core/tagger/taggerimpl/generic_store/doc.go b/comp/core/tagger/generic_store/doc.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/generic_store/doc.go
rename to comp/core/tagger/generic_store/doc.go
diff --git a/comp/core/tagger/taggerimpl/generic_store/store.go b/comp/core/tagger/generic_store/store.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/generic_store/store.go
rename to comp/core/tagger/generic_store/store.go
diff --git a/comp/core/tagger/taggerimpl/generic_store/store_test.go b/comp/core/tagger/generic_store/store_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/generic_store/store_test.go
rename to comp/core/tagger/generic_store/store_test.go
diff --git a/comp/core/tagger/impl-dual/dual.go b/comp/core/tagger/impl-dual/dual.go
new file mode 100644
index 0000000000000..261eeb6a2d942
--- /dev/null
+++ b/comp/core/tagger/impl-dual/dual.go
@@ -0,0 +1,79 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package dualimpl contains the implementation of the dual tagger.
+// The dualimpl allow clients to use either the remote tagger or the local based on
+// their configuration
+package dualimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/config"
+ log "github.com/DataDog/datadog-agent/comp/core/log/def"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ local "github.com/DataDog/datadog-agent/comp/core/tagger/impl"
+ remote "github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ compdef "github.com/DataDog/datadog-agent/comp/def"
+)
+
+// Requires contains the dependencies for the dual tagger component
+type Requires struct {
+ Lc compdef.Lifecycle
+ LocalParams tagger.Params
+ RemoteParams tagger.RemoteParams
+ DualParams tagger.DualParams
+ Config config.Component
+ Log log.Component
+ Wmeta workloadmeta.Component
+ Telemetry telemetry.Component
+}
+
+// Provides contains returned values for the dual tagger component
+type Provides struct {
+ local.Provides
+}
+
+// NewComponent returns either a remote tagger or a local tagger based on the configuration
+func NewComponent(req Requires) (Provides, error) {
+ if req.DualParams.UseRemote(req.Config) {
+ remoteRequires := remote.Requires{
+ Lc: req.Lc,
+ Params: req.RemoteParams,
+ Config: req.Config,
+ Log: req.Log,
+ Telemetry: req.Telemetry,
+ }
+
+ provide, err := remote.NewComponent(remoteRequires)
+ if err != nil {
+ return Provides{}, err
+ }
+
+ return Provides{
+ local.Provides{
+ Comp: provide.Comp,
+ },
+ }, nil
+ }
+
+ localRequires := local.Requires{
+ Config: req.Config,
+ Telemetry: req.Telemetry,
+ Wmeta: req.Wmeta,
+ Lc: req.Lc,
+ Log: req.Log,
+ Params: req.LocalParams,
+ }
+ provide, err := local.NewComponent(localRequires)
+
+ if err != nil {
+ return Provides{}, err
+ }
+
+ return Provides{
+ provide,
+ }, nil
+}
diff --git a/comp/core/tagger/noopimpl/tagger.go b/comp/core/tagger/impl-noop/tagger.go
similarity index 86%
rename from comp/core/tagger/noopimpl/tagger.go
rename to comp/core/tagger/impl-noop/tagger.go
index 905082da82469..811a60e905923 100644
--- a/comp/core/tagger/noopimpl/tagger.go
+++ b/comp/core/tagger/impl-noop/tagger.go
@@ -15,28 +15,14 @@ package noopimpl
import (
"context"
- "fmt"
- "go.uber.org/fx"
-
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
- "github.com/DataDog/datadog-agent/pkg/util/fxutil"
)
-// Module defines the fx options for this component.
-func Module() fxutil.Module {
- return fxutil.Component(
- fx.Provide(
- NewTaggerClient,
- ),
- )
-
-}
-
type noopTagger struct{}
func (n *noopTagger) Start(context.Context) error {
@@ -80,7 +66,7 @@ func (n *noopTagger) GetEntity(types.EntityID) (*types.Entity, error) {
}
func (n *noopTagger) Subscribe(string, *types.Filter) (types.Subscription, error) {
- return nil, fmt.Errorf("not implemented")
+ return nil, nil
}
func (n *noopTagger) GetEntityHash(types.EntityID, types.TagCardinality) string {
@@ -109,7 +95,7 @@ func (n *noopTagger) DogstatsdCardinality() types.TagCardinality {
return types.LowCardinality
}
-// NewTaggerClient returns a new noop tagger client
-func NewTaggerClient() tagger.Component {
+// NewComponent returns a new noop tagger component
+func NewComponent() tagger.Component {
return &noopTagger{}
}
diff --git a/comp/core/tagger/taggerimpl/remote/tagger.go b/comp/core/tagger/impl-remote/remote.go
similarity index 59%
rename from comp/core/tagger/taggerimpl/remote/tagger.go
rename to comp/core/tagger/impl-remote/remote.go
index ec0ef90d65779..ec1bf27acbc8f 100644
--- a/comp/core/tagger/taggerimpl/remote/tagger.go
+++ b/comp/core/tagger/impl-remote/remote.go
@@ -3,15 +3,14 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package remote implements a remote Tagger.
-package remote
+// Package remotetaggerimpl implements a remote Tagger.
+package remotetaggerimpl
import (
"context"
"crypto/tls"
"fmt"
"net"
- "strings"
"time"
"github.com/cenkalti/backoff"
@@ -23,17 +22,19 @@ import (
"google.golang.org/grpc/metadata"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ log "github.com/DataDog/datadog-agent/comp/core/log/def"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
- "github.com/DataDog/datadog-agent/pkg/api/security"
- "github.com/DataDog/datadog-agent/pkg/config/utils"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
+ coretelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ compdef "github.com/DataDog/datadog-agent/comp/def"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
+ taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
+ "github.com/DataDog/datadog-agent/pkg/util/common"
grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc"
- "github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
@@ -43,14 +44,31 @@ const (
var errTaggerStreamNotStarted = errors.New("tagger stream not started")
-// Tagger holds a connection to a remote tagger, processes incoming events from
-// it, and manages the storage of entities to allow querying.
-type Tagger struct {
+// Requires defines the dependencies for the remote tagger.
+type Requires struct {
+ compdef.In
+
+ Lc compdef.Lifecycle
+ Config config.Component
+ Log log.Component
+ Params tagger.RemoteParams
+ Telemetry coretelemetry.Component
+}
+
+// Provides contains the fields provided by the remote tagger constructor.
+type Provides struct {
+ compdef.Out
+
+ Comp tagger.Component
+}
+
+type remoteTagger struct {
store *tagStore
ready bool
options Options
cfg config.Component
+ log log.Component
conn *grpc.ClientConn
client pb.AgentSecureClient
@@ -65,7 +83,9 @@ type Tagger struct {
telemetryTicker *time.Ticker
telemetryStore *telemetry.Store
- empty.Tagger
+
+ checksCardinality types.TagCardinality
+ dogstatsdCardinality types.TagCardinality
}
// Options contains the options needed to configure the remote tagger.
@@ -75,59 +95,69 @@ type Options struct {
Disabled bool
}
-// NodeAgentOptions returns the tagger options used in the node agent.
-func NodeAgentOptions(config config.Component) (Options, error) {
- return Options{
- Target: fmt.Sprintf(":%v", config.GetInt("cmd_port")),
- TokenFetcher: func() (string, error) { return security.FetchAuthToken(config) },
- }, nil
-}
+// NewComponent returns a remote tagger
+func NewComponent(req Requires) (Provides, error) {
+ remoteTagger, err := NewRemoteTagger(req.Params, req.Config, req.Log, req.Telemetry)
+
+ if err != nil {
+ return Provides{}, err
+ }
-// NodeAgentOptionsForSecurityResolvers is a legacy function that returns the
-// same options as NodeAgentOptions, but it's used by the tag security resolvers only
-// TODO (component): remove this function once the security resolver migrates to component
-func NodeAgentOptionsForSecurityResolvers(cfg config.Component) (Options, error) {
- return Options{
- Target: fmt.Sprintf(":%v", cfg.GetInt("cmd_port")),
- TokenFetcher: func() (string, error) { return security.FetchAuthToken(cfg) },
+ req.Lc.Append(compdef.Hook{OnStart: func(_ context.Context) error {
+ mainCtx, _ := common.GetMainCtxCancel()
+ return remoteTagger.Start(mainCtx)
+ }})
+ req.Lc.Append(compdef.Hook{OnStop: func(context.Context) error {
+ return remoteTagger.Stop()
+ }})
+
+ return Provides{
+ Comp: remoteTagger,
}, nil
}
-// CLCRunnerOptions returns the tagger options used in the CLC Runner.
-func CLCRunnerOptions(config config.Component) (Options, error) {
- opts := Options{
- Disabled: !config.GetBool("clc_runner_remote_tagger_enabled"),
- }
-
- if !opts.Disabled {
- target, err := utils.GetClusterAgentEndpoint()
- if err != nil {
- return opts, fmt.Errorf("unable to get cluster agent endpoint: %w", err)
- }
- // gRPC targets do not have a protocol. the DCA endpoint is always HTTPS,
- // so a simple `TrimPrefix` is enough.
- opts.Target = strings.TrimPrefix(target, "https://")
- opts.TokenFetcher = func() (string, error) { return security.GetClusterAgentAuthToken(config) }
+// NewRemoteTagger creates a new remote tagger.
+// TODO: (components) remove once we pass the remote tagger instance to pkg/security/resolvers/tags/resolver.go
+func NewRemoteTagger(params tagger.RemoteParams, cfg config.Component, log log.Component, telemetryComp coretelemetry.Component) (tagger.Component, error) {
+ telemetryStore := telemetry.NewStore(telemetryComp)
+ target, err := params.RemoteTarget(cfg)
+ if err != nil {
+ return nil, err
}
- return opts, nil
-}
-// NewTagger returns an allocated tagger. You still have to run Init()
-// once the config package is ready.
-func NewTagger(options Options, cfg config.Component, telemetryStore *telemetry.Store, filter *types.Filter) *Tagger {
- return &Tagger{
- options: options,
+ remotetagger := &remoteTagger{
+ options: Options{
+ Target: target,
+ TokenFetcher: params.RemoteTokenFetcher(cfg),
+ },
cfg: cfg,
store: newTagStore(cfg, telemetryStore),
telemetryStore: telemetryStore,
- filter: filter,
+ filter: params.RemoteFilter,
+ log: log,
+ }
+
+ checkCard := cfg.GetString("checks_tag_cardinality")
+ dsdCard := cfg.GetString("dogstatsd_tag_cardinality")
+ remotetagger.checksCardinality, err = types.StringToTagCardinality(checkCard)
+ if err != nil {
+ log.Warnf("failed to parse check tag cardinality, defaulting to low. Error: %s", err)
+ remotetagger.checksCardinality = types.LowCardinality
}
+
+ remotetagger.dogstatsdCardinality, err = types.StringToTagCardinality(dsdCard)
+ if err != nil {
+ log.Warnf("failed to parse dogstatsd tag cardinality, defaulting to low. Error: %s", err)
+ remotetagger.dogstatsdCardinality = types.LowCardinality
+ }
+
+ return remotetagger, nil
}
// Start creates the connection to the remote tagger and starts watching for
// events.
-func (t *Tagger) Start(ctx context.Context) error {
+func (t *remoteTagger) Start(ctx context.Context) error {
t.telemetryTicker = time.NewTicker(1 * time.Minute)
t.ctx, t.cancel = context.WithCancel(ctx)
@@ -166,7 +196,7 @@ func (t *Tagger) Start(ctx context.Context) error {
return err
}
- log.Info("remote tagger initialized successfully")
+ t.log.Info("remote tagger initialized successfully")
go t.run()
@@ -174,7 +204,7 @@ func (t *Tagger) Start(ctx context.Context) error {
}
// Stop closes the connection to the remote tagger and stops event collection.
-func (t *Tagger) Stop() error {
+func (t *remoteTagger) Stop() error {
t.cancel()
err := t.conn.Close()
@@ -184,24 +214,24 @@ func (t *Tagger) Stop() error {
t.telemetryTicker.Stop()
- log.Info("remote tagger stopped successfully")
+ t.log.Info("remote tagger stopped successfully")
return nil
}
// ReplayTagger returns the replay tagger instance
// This is a no-op for the remote tagger
-func (t *Tagger) ReplayTagger() tagger.ReplayTagger {
+func (t *remoteTagger) ReplayTagger() tagger.ReplayTagger {
return nil
}
// GetTaggerTelemetryStore returns tagger telemetry store
-func (t *Tagger) GetTaggerTelemetryStore() *telemetry.Store {
+func (t *remoteTagger) GetTaggerTelemetryStore() *telemetry.Store {
return t.telemetryStore
}
// Tag returns tags for a given entity at the desired cardinality.
-func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
+func (t *remoteTagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
entity := t.store.getEntity(entityID)
if entity != nil {
t.telemetryStore.QueriesByCardinality(cardinality).Success.Inc()
@@ -217,7 +247,7 @@ func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality)
// If possible, avoid using this function, and use the Tag method instead.
// This function exists in order not to break backward compatibility with rtloader and python
// integrations using the tagger
-func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
+func (t *remoteTagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
prefix, id, err := taggercommon.ExtractPrefixAndID(entity)
if err != nil {
return nil, err
@@ -228,7 +258,7 @@ func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]s
}
// AccumulateTagsFor returns tags for a given entity at the desired cardinality.
-func (t *Tagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
+func (t *remoteTagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
tags, err := t.Tag(entityID, cardinality)
if err != nil {
return err
@@ -238,7 +268,7 @@ func (t *Tagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.Ta
}
// Standard returns the standard tags for a given entity.
-func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
+func (t *remoteTagger) Standard(entityID types.EntityID) ([]string, error) {
entity := t.store.getEntity(entityID)
if entity == nil {
return []string{}, nil
@@ -248,7 +278,7 @@ func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
}
// GetEntity returns the entity corresponding to the specified id and an error
-func (t *Tagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
+func (t *remoteTagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
entity := t.store.getEntity(entityID)
if entity == nil {
return nil, fmt.Errorf("Entity not found for entityID")
@@ -258,7 +288,7 @@ func (t *Tagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
}
// List returns all the entities currently stored by the tagger.
-func (t *Tagger) List() types.TaggerListResponse {
+func (t *remoteTagger) List() types.TaggerListResponse {
entities := t.store.listEntities()
resp := types.TaggerListResponse{
Entities: make(map[string]types.TaggerListEntity),
@@ -275,14 +305,63 @@ func (t *Tagger) List() types.TaggerListResponse {
return resp
}
+// GetEntityHash returns the hash for the tags associated with the given entity
+// Returns an empty string if the tags lookup fails
+func (t *remoteTagger) GetEntityHash(entityID types.EntityID, cardinality types.TagCardinality) string {
+ tags, err := t.Tag(entityID, cardinality)
+ if err != nil {
+ return ""
+ }
+ return utils.ComputeTagsHash(tags)
+}
+
+// AgentTags is a no-op in the remote tagger.
+// Agents using the remote tagger are not supposed to rely on this function,
+// because to get the container ID where the agent is running we'd need to
+// introduce some dependencies that we don't want to have in the remote
+// tagger.
+// The only user of this function that uses the remote tagger is the cluster
+// check runner, but it gets its tags from the cluster-agent which doesn't
+// store tags for containers. So this function is a no-op.
+func (t *remoteTagger) AgentTags(_ types.TagCardinality) ([]string, error) {
+ return nil, nil
+}
+
+func (t *remoteTagger) GlobalTags(cardinality types.TagCardinality) ([]string, error) {
+ return t.Tag(taggercommon.GetGlobalEntityID(), cardinality)
+}
+
+func (t *remoteTagger) SetNewCaptureTagger(tagger.Component) {}
+
+func (t *remoteTagger) ResetCaptureTagger() {}
+
+// EnrichTags enriches the tags with the global tags.
+// Agents running the remote tagger don't have the ability to enrich tags based
+// on the origin info. Only the core agent or dogstatsd can have origin info,
+// and they always use the local tagger.
+// This function can only add the global tags.
+func (t *remoteTagger) EnrichTags(tb tagset.TagsAccumulator, _ taggertypes.OriginInfo) {
+ if err := t.AccumulateTagsFor(taggercommon.GetGlobalEntityID(), t.dogstatsdCardinality, tb); err != nil {
+ t.log.Error(err.Error())
+ }
+}
+
+func (t *remoteTagger) ChecksCardinality() types.TagCardinality {
+ return t.checksCardinality
+}
+
+func (t *remoteTagger) DogstatsdCardinality() types.TagCardinality {
+ return t.dogstatsdCardinality
+}
+
// Subscribe returns a channel that receives a slice of events whenever an entity is
// added, modified or deleted. It can send an initial burst of events only to the new
// subscriber, without notifying all of the others.
-func (t *Tagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
+func (t *remoteTagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
return t.store.subscribe(subscriptionID, filter)
}
-func (t *Tagger) run() {
+func (t *remoteTagger) run() {
for {
select {
case <-t.telemetryTicker.C:
@@ -295,7 +374,7 @@ func (t *Tagger) run() {
if t.stream == nil {
if err := t.startTaggerStream(noTimeout); err != nil {
- log.Warnf("error received trying to start stream with target %q: %s", t.options.Target, err)
+ t.log.Warnf("error received trying to start stream with target %q: %s", t.options.Target, err)
continue
}
}
@@ -319,7 +398,7 @@ func (t *Tagger) run() {
t.ready = false
t.stream = nil
- log.Warnf("error received from remote tagger: %s", err)
+ t.log.Warnf("error received from remote tagger: %s", err)
continue
}
@@ -328,13 +407,13 @@ func (t *Tagger) run() {
err = t.processResponse(response)
if err != nil {
- log.Warnf("error processing event received from remote tagger: %s", err)
+ t.log.Warnf("error processing event received from remote tagger: %s", err)
continue
}
}
}
-func (t *Tagger) processResponse(response *pb.StreamTagsResponse) error {
+func (t *remoteTagger) processResponse(response *pb.StreamTagsResponse) error {
// returning early when there are no events prevents a keep-alive sent
// from the core agent from wiping the store clean in case the remote
// tagger was previously in an unready (but filled) state.
@@ -346,7 +425,7 @@ func (t *Tagger) processResponse(response *pb.StreamTagsResponse) error {
for _, ev := range response.Events {
eventType, err := convertEventType(ev.Type)
if err != nil {
- log.Warnf("error processing event received from remote tagger: %s", err)
+ t.log.Warnf("error processing event received from remote tagger: %s", err)
continue
}
@@ -382,7 +461,7 @@ func (t *Tagger) processResponse(response *pb.StreamTagsResponse) error {
// Since the entire remote tagger really depends on this working, it'll keep on
// retrying with an exponential backoff until maxElapsed (or forever if
// maxElapsed == 0) or the tagger is stopped.
-func (t *Tagger) startTaggerStream(maxElapsed time.Duration) error {
+func (t *remoteTagger) startTaggerStream(maxElapsed time.Duration) error {
expBackoff := backoff.NewExponentialBackOff()
expBackoff.InitialInterval = 500 * time.Millisecond
expBackoff.MaxInterval = 5 * time.Minute
@@ -397,7 +476,7 @@ func (t *Tagger) startTaggerStream(maxElapsed time.Duration) error {
token, err := t.options.TokenFetcher()
if err != nil {
- log.Infof("unable to fetch auth token, will possibly retry: %s", err)
+ t.log.Infof("unable to fetch auth token, will possibly retry: %s", err)
return err
}
@@ -418,11 +497,11 @@ func (t *Tagger) startTaggerStream(maxElapsed time.Duration) error {
Prefixes: prefixes,
})
if err != nil {
- log.Infof("unable to establish stream, will possibly retry: %s", err)
+ t.log.Infof("unable to establish stream, will possibly retry: %s", err)
return err
}
- log.Info("tagger stream established successfully")
+ t.log.Info("tagger stream established successfully")
return nil
}, expBackoff)
diff --git a/comp/core/tagger/taggerimpl/remote/tagstore.go b/comp/core/tagger/impl-remote/tagstore.go
similarity index 97%
rename from comp/core/tagger/taggerimpl/remote/tagstore.go
rename to comp/core/tagger/impl-remote/tagstore.go
index 805c13c2d47f1..9f5553200604c 100644
--- a/comp/core/tagger/taggerimpl/remote/tagstore.go
+++ b/comp/core/tagger/impl-remote/tagstore.go
@@ -3,14 +3,14 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package remote
+package remotetaggerimpl
import (
"sync"
"github.com/DataDog/datadog-agent/comp/core/config"
- genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/generic_store"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/subscriber"
+ genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/generic_store"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/subscriber"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
)
diff --git a/comp/core/tagger/taggerimpl/remote/tagstore_test.go b/comp/core/tagger/impl-remote/tagstore_test.go
similarity index 99%
rename from comp/core/tagger/taggerimpl/remote/tagstore_test.go
rename to comp/core/tagger/impl-remote/tagstore_test.go
index c00abbde5a5a0..975127852483f 100644
--- a/comp/core/tagger/taggerimpl/remote/tagstore_test.go
+++ b/comp/core/tagger/impl-remote/tagstore_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package remote
+package remotetaggerimpl
import (
"testing"
diff --git a/comp/core/tagger/taggerimpl/local/fake_tagger.go b/comp/core/tagger/impl/fake_tagger.go
similarity index 81%
rename from comp/core/tagger/taggerimpl/local/fake_tagger.go
rename to comp/core/tagger/impl/fake_tagger.go
index 19404d821a7c7..ed2973b8a9513 100644
--- a/comp/core/tagger/taggerimpl/local/fake_tagger.go
+++ b/comp/core/tagger/impl/fake_tagger.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package local
+package taggerimpl
import (
"context"
@@ -11,26 +11,25 @@ import (
"sync"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/tagstore"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
+
+ taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
)
-// FakeTagger implements the Tagger interface
+// FakeTagger is a fake implementation of the tagger interface
type FakeTagger struct {
errors map[string]error
store *tagstore.TagStore
telemetryStore *telemetry.Store
sync.RWMutex
- empty.Tagger
}
-// NewFakeTagger returns a new fake Tagger
-func NewFakeTagger(cfg config.Component, telemetryStore *telemetry.Store) *FakeTagger {
+func newFakeTagger(cfg config.Component, telemetryStore *telemetry.Store) *FakeTagger {
return &FakeTagger{
errors: make(map[string]error),
store: tagstore.NewTagStore(cfg, telemetryStore),
@@ -38,8 +37,6 @@ func NewFakeTagger(cfg config.Component, telemetryStore *telemetry.Store) *FakeT
}
}
-// FakeTagger specific interface
-
// SetTags allows to set tags in store for a given source, entity
func (f *FakeTagger) SetTags(entityID types.EntityID, source string, low, orch, high, std []string) {
f.store.ProcessTagInfo([]*types.TagInfo{
@@ -162,3 +159,32 @@ func (f *FakeTagger) Subscribe(subscriptionID string, filter *types.Filter) (typ
func (f *FakeTagger) getKey(entity types.EntityID, cardinality types.TagCardinality) string {
return entity.String() + strconv.FormatInt(int64(cardinality), 10)
}
+
+// GetEntityHash noop
+func (f *FakeTagger) GetEntityHash(types.EntityID, types.TagCardinality) string {
+ return ""
+}
+
+// AgentTags noop
+func (f *FakeTagger) AgentTags(types.TagCardinality) ([]string, error) {
+ return []string{}, nil
+}
+
+// SetNewCaptureTagger noop
+func (f *FakeTagger) SetNewCaptureTagger(tagger.Component) {}
+
+// ResetCaptureTagger noop
+func (f *FakeTagger) ResetCaptureTagger() {}
+
+// EnrichTags noop
+func (f *FakeTagger) EnrichTags(tagset.TagsAccumulator, taggertypes.OriginInfo) {}
+
+// ChecksCardinality noop
+func (f *FakeTagger) ChecksCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
+
+// DogstatsdCardinality noop
+func (f *FakeTagger) DogstatsdCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
diff --git a/comp/core/tagger/taggerimpl/local/tagger.go b/comp/core/tagger/impl/local_tagger.go
similarity index 63%
rename from comp/core/tagger/taggerimpl/local/tagger.go
rename to comp/core/tagger/impl/local_tagger.go
index e9193da317c42..81f5b51b1a1c8 100644
--- a/comp/core/tagger/taggerimpl/local/tagger.go
+++ b/comp/core/tagger/impl/local_tagger.go
@@ -3,8 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package local implements a local Tagger.
-package local
+package taggerimpl
import (
"context"
@@ -12,14 +11,14 @@ import (
"sync"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/collectors"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/tagstore"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
)
@@ -27,7 +26,7 @@ import (
// memory store, and handles the query logic. One should use the package
// methods in comp/core/tagger to use the default Tagger instead of instantiating it
// directly.
-type Tagger struct {
+type localTagger struct {
sync.RWMutex
tagStore *tagstore.TagStore
@@ -38,24 +37,19 @@ type Tagger struct {
ctx context.Context
cancel context.CancelFunc
telemetryStore *telemetry.Store
- empty.Tagger
}
-// NewTagger returns an allocated tagger. You are probably looking for
-// tagger.Tag() using the global instance instead of creating your own.
-func NewTagger(cfg config.Component, workloadStore workloadmeta.Component, telemetryStore *telemetry.Store) *Tagger {
- return &Tagger{
+func newLocalTagger(cfg config.Component, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (tagger.Component, error) {
+ return &localTagger{
tagStore: tagstore.NewTagStore(cfg, telemetryStore),
- workloadStore: workloadStore,
+ workloadStore: wmeta,
telemetryStore: telemetryStore,
cfg: cfg,
- }
+ }, nil
}
-var _ tagger.Component = NewTagger(nil, nil, nil)
-
// Start starts the workloadmeta collector and then it is ready for requests.
-func (t *Tagger) Start(ctx context.Context) error {
+func (t *localTagger) Start(ctx context.Context) error {
t.ctx, t.cancel = context.WithCancel(ctx)
t.collector = collectors.NewWorkloadMetaCollector(
@@ -72,13 +66,13 @@ func (t *Tagger) Start(ctx context.Context) error {
}
// Stop queues a shutdown of Tagger
-func (t *Tagger) Stop() error {
+func (t *localTagger) Stop() error {
t.cancel()
return nil
}
// getTags returns a read only list of tags for a given entity.
-func (t *Tagger) getTags(entityID types.EntityID, cardinality types.TagCardinality) (tagset.HashedTags, error) {
+func (t *localTagger) getTags(entityID types.EntityID, cardinality types.TagCardinality) (tagset.HashedTags, error) {
if entityID.Empty() {
t.telemetryStore.QueriesByCardinality(cardinality).EmptyEntityID.Inc()
return tagset.HashedTags{}, fmt.Errorf("empty entity ID")
@@ -91,14 +85,14 @@ func (t *Tagger) getTags(entityID types.EntityID, cardinality types.TagCardinali
}
// AccumulateTagsFor appends tags for a given entity from the tagger to the TagsAccumulator
-func (t *Tagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
+func (t *localTagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
tags, err := t.getTags(entityID, cardinality)
tb.AppendHashed(tags)
return err
}
// Tag returns a copy of the tags for a given entity
-func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
+func (t *localTagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
tags, err := t.getTags(entityID, cardinality)
if err != nil {
return nil, err
@@ -110,7 +104,7 @@ func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality)
// If possible, avoid using this function, and use the Tag method instead.
// This function exists in order not to break backward compatibility with rtloader and python
// integrations using the tagger
-func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
+func (t *localTagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
prefix, id, err := taggercommon.ExtractPrefixAndID(entity)
if err != nil {
return nil, err
@@ -122,7 +116,7 @@ func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]s
// Standard returns standard tags for a given entity
// It triggers a tagger fetch if the no tags are found
-func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
+func (t *localTagger) Standard(entityID types.EntityID) ([]string, error) {
if entityID.Empty() {
return nil, fmt.Errorf("empty entity ID")
}
@@ -131,29 +125,55 @@ func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
}
// GetEntity returns the entity corresponding to the specified id and an error
-func (t *Tagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
+func (t *localTagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
return t.tagStore.GetEntity(entityID)
}
// List the content of the tagger
-func (t *Tagger) List() types.TaggerListResponse {
+func (t *localTagger) List() types.TaggerListResponse {
return t.tagStore.List()
}
// Subscribe returns a channel that receives a slice of events whenever an entity is
// added, modified or deleted. It can send an initial burst of events only to the new
// subscriber, without notifying all of the others.
-func (t *Tagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
+func (t *localTagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
return t.tagStore.Subscribe(subscriptionID, filter)
}
// ReplayTagger returns the replay tagger instance
// This is a no-op for the local tagger
-func (t *Tagger) ReplayTagger() tagger.ReplayTagger {
+func (t *localTagger) ReplayTagger() tagger.ReplayTagger {
return nil
}
// GetTaggerTelemetryStore returns tagger telemetry store
-func (t *Tagger) GetTaggerTelemetryStore() *telemetry.Store {
+func (t *localTagger) GetTaggerTelemetryStore() *telemetry.Store {
return t.telemetryStore
}
+
+func (t *localTagger) GetEntityHash(types.EntityID, types.TagCardinality) string {
+ return ""
+}
+
+func (t *localTagger) AgentTags(types.TagCardinality) ([]string, error) {
+ return []string{}, nil
+}
+
+func (t *localTagger) GlobalTags(types.TagCardinality) ([]string, error) {
+ return []string{}, nil
+}
+
+func (t *localTagger) SetNewCaptureTagger(tagger.Component) {}
+
+func (t *localTagger) ResetCaptureTagger() {}
+
+func (t *localTagger) EnrichTags(tagset.TagsAccumulator, taggertypes.OriginInfo) {}
+
+func (t *localTagger) ChecksCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
+
+func (t *localTagger) DogstatsdCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
diff --git a/comp/core/tagger/taggerimpl/local/tagger_test.go b/comp/core/tagger/impl/local_tagger_test.go
similarity index 79%
rename from comp/core/tagger/taggerimpl/local/tagger_test.go
rename to comp/core/tagger/impl/local_tagger_test.go
index 99fddf0879859..e587407860e01 100644
--- a/comp/core/tagger/taggerimpl/local/tagger_test.go
+++ b/comp/core/tagger/impl/local_tagger_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package local
+package taggerimpl
import (
"context"
@@ -21,7 +21,6 @@ import (
"github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
- workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
"github.com/DataDog/datadog-agent/pkg/tagset"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
@@ -30,7 +29,7 @@ import (
func TestAccumulateTagsFor(t *testing.T) {
entityID := types.NewEntityID("", "entity_name")
- store := fxutil.Test[workloadmetamock.Mock](t, fx.Options(
+ store := fxutil.Test[workloadmeta.Component](t, fx.Options(
fx.Supply(config.Params{}),
fx.Supply(log.Params{}),
fx.Provide(func() log.Component { return logmock.New(t) }),
@@ -41,11 +40,13 @@ func TestAccumulateTagsFor(t *testing.T) {
tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule())
telemetryStore := taggerTelemetry.NewStore(tel)
cfg := configmock.New(t)
- tagger := NewTagger(cfg, store, telemetryStore)
- tagger.Start(context.Background())
+ tagger, err := newLocalTagger(cfg, store, telemetryStore)
+ assert.NoError(t, err)
+ localTagger := tagger.(*localTagger)
+ localTagger.Start(context.Background())
defer tagger.Stop()
- tagger.tagStore.ProcessTagInfo([]*types.TagInfo{
+ localTagger.tagStore.ProcessTagInfo([]*types.TagInfo{
{
EntityID: entityID,
Source: "stream",
@@ -60,7 +61,7 @@ func TestAccumulateTagsFor(t *testing.T) {
})
tb := tagset.NewHashlessTagsAccumulator()
- err := tagger.AccumulateTagsFor(entityID, types.HighCardinality, tb)
+ err = localTagger.AccumulateTagsFor(entityID, types.HighCardinality, tb)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"high", "low1", "low2"}, tb.Get())
}
@@ -68,7 +69,7 @@ func TestAccumulateTagsFor(t *testing.T) {
func TestTag(t *testing.T) {
entityID := types.NewEntityID(types.ContainerID, "123")
- store := fxutil.Test[workloadmetamock.Mock](t, fx.Options(
+ store := fxutil.Test[workloadmeta.Component](t, fx.Options(
fx.Supply(config.Params{}),
fx.Supply(log.Params{}),
fx.Provide(func() log.Component { return logmock.New(t) }),
@@ -79,9 +80,11 @@ func TestTag(t *testing.T) {
tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule())
telemetryStore := taggerTelemetry.NewStore(tel)
cfg := configmock.New(t)
- tagger := NewTagger(cfg, store, telemetryStore)
+ tagger, err := newLocalTagger(cfg, store, telemetryStore)
+ assert.NoError(t, err)
+ localTagger := tagger.(*localTagger)
- tagger.tagStore.ProcessTagInfo([]*types.TagInfo{
+ localTagger.tagStore.ProcessTagInfo([]*types.TagInfo{
{
EntityID: entityID,
Source: "stream",
@@ -98,15 +101,15 @@ func TestTag(t *testing.T) {
},
})
- lowCardTags, err := tagger.Tag(entityID, types.LowCardinality)
+ lowCardTags, err := localTagger.Tag(entityID, types.LowCardinality)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"low1", "low2"}, lowCardTags)
- orchestratorCardTags, err := tagger.Tag(entityID, types.OrchestratorCardinality)
+ orchestratorCardTags, err := localTagger.Tag(entityID, types.OrchestratorCardinality)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"low1", "low2", "orchestrator1", "orchestrator2"}, orchestratorCardTags)
- highCardTags, err := tagger.Tag(entityID, types.HighCardinality)
+ highCardTags, err := localTagger.Tag(entityID, types.HighCardinality)
assert.NoError(t, err)
assert.ElementsMatch(t, []string{"low1", "low2", "orchestrator1", "orchestrator2", "high1", "high2"}, highCardTags)
}
diff --git a/comp/core/tagger/taggerimpl/replay/tagger.go b/comp/core/tagger/impl/replay_tagger.go
similarity index 63%
rename from comp/core/tagger/taggerimpl/replay/tagger.go
rename to comp/core/tagger/impl/replay_tagger.go
index c13ded9685009..90d026446e503 100644
--- a/comp/core/tagger/taggerimpl/replay/tagger.go
+++ b/comp/core/tagger/impl/replay_tagger.go
@@ -3,8 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// Package replay implements the Tagger replay.
-package replay
+package taggerimpl
import (
"context"
@@ -12,18 +11,17 @@ import (
"time"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/empty"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/tagstore"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
+ taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
-// Tagger stores tags to entity as stored in a replay state.
-type Tagger struct {
+type replayTagger struct {
store *tagstore.TagStore
ctx context.Context
@@ -31,13 +29,10 @@ type Tagger struct {
telemetryTicker *time.Ticker
telemetryStore *telemetry.Store
- empty.Tagger
}
-// NewTagger returns an allocated tagger. You still have to run Init()
-// once the config package is ready.
-func NewTagger(cfg config.Component, telemetryStore *telemetry.Store) *Tagger {
- return &Tagger{
+func newReplayTagger(cfg config.Component, telemetryStore *telemetry.Store) tagger.ReplayTagger {
+ return &replayTagger{
store: tagstore.NewTagStore(cfg, telemetryStore),
telemetryStore: telemetryStore,
}
@@ -45,7 +40,7 @@ func NewTagger(cfg config.Component, telemetryStore *telemetry.Store) *Tagger {
// Start starts the connection to the replay tagger and starts watching for
// events.
-func (t *Tagger) Start(ctx context.Context) error {
+func (t *replayTagger) Start(ctx context.Context) error {
t.telemetryTicker = time.NewTicker(1 * time.Minute)
t.ctx, t.cancel = context.WithCancel(ctx)
@@ -54,7 +49,7 @@ func (t *Tagger) Start(ctx context.Context) error {
}
// Stop closes the connection to the replay tagger and stops event collection.
-func (t *Tagger) Stop() error {
+func (t *replayTagger) Stop() error {
t.cancel()
t.telemetryTicker.Stop()
@@ -65,7 +60,7 @@ func (t *Tagger) Stop() error {
}
// Tag returns tags for a given entity at the desired cardinality.
-func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
+func (t *replayTagger) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
tags := t.store.Lookup(entityID, cardinality)
return tags, nil
}
@@ -74,7 +69,7 @@ func (t *Tagger) Tag(entityID types.EntityID, cardinality types.TagCardinality)
// If possible, avoid using this function, and use the Tag method instead.
// This function exists in order not to break backward compatibility with rtloader and python
// integrations using the tagger
-func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
+func (t *replayTagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
prefix, id, err := taggercommon.ExtractPrefixAndID(entity)
if err != nil {
return nil, err
@@ -85,7 +80,7 @@ func (t *Tagger) LegacyTag(entity string, cardinality types.TagCardinality) ([]s
}
// AccumulateTagsFor returns tags for a given entity at the desired cardinality.
-func (t *Tagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
+func (t *replayTagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
tags := t.store.LookupHashed(entityID, cardinality)
if tags.Len() == 0 {
@@ -100,7 +95,7 @@ func (t *Tagger) AccumulateTagsFor(entityID types.EntityID, cardinality types.Ta
}
// Standard returns the standard tags for a given entity.
-func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
+func (t *replayTagger) Standard(entityID types.EntityID) ([]string, error) {
tags, err := t.store.LookupStandard(entityID)
if err != nil {
return []string{}, err
@@ -110,28 +105,28 @@ func (t *Tagger) Standard(entityID types.EntityID) ([]string, error) {
}
// List returns all the entities currently stored by the tagger.
-func (t *Tagger) List() types.TaggerListResponse {
+func (t *replayTagger) List() types.TaggerListResponse {
return t.store.List()
}
// Subscribe does nothing in the replay tagger this tagger does not respond to events.
-func (t *Tagger) Subscribe(_ string, _ *types.Filter) (types.Subscription, error) {
+func (t *replayTagger) Subscribe(_ string, _ *types.Filter) (types.Subscription, error) {
// NOP
return nil, fmt.Errorf("not implemented")
}
// ReplayTagger returns the replay tagger instance
-func (t *Tagger) ReplayTagger() tagger.ReplayTagger {
- return t
+func (t *replayTagger) ReplayTagger() tagger.ReplayTagger {
+ return nil
}
// GetTaggerTelemetryStore returns tagger telemetry store
-func (t *Tagger) GetTaggerTelemetryStore() *telemetry.Store {
+func (t *replayTagger) GetTaggerTelemetryStore() *telemetry.Store {
return t.telemetryStore
}
// LoadState loads the state for the tagger from the supplied map.
-func (t *Tagger) LoadState(state []types.Entity) {
+func (t *replayTagger) LoadState(state []types.Entity) {
if state == nil {
return
}
@@ -152,6 +147,32 @@ func (t *Tagger) LoadState(state []types.Entity) {
}
// GetEntity returns the entity corresponding to the specified id and an error
-func (t *Tagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
+func (t *replayTagger) GetEntity(entityID types.EntityID) (*types.Entity, error) {
return t.store.GetEntity(entityID)
}
+
+func (t *replayTagger) GetEntityHash(types.EntityID, types.TagCardinality) string {
+ return ""
+}
+
+func (t *replayTagger) AgentTags(types.TagCardinality) ([]string, error) {
+ return []string{}, nil
+}
+
+func (t *replayTagger) GlobalTags(types.TagCardinality) ([]string, error) {
+ return []string{}, nil
+}
+
+func (t *replayTagger) SetNewCaptureTagger(tagger.Component) {}
+
+func (t *replayTagger) ResetCaptureTagger() {}
+
+func (t *replayTagger) EnrichTags(tagset.TagsAccumulator, taggertypes.OriginInfo) {}
+
+func (t *replayTagger) ChecksCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
+
+func (t *replayTagger) DogstatsdCardinality() types.TagCardinality {
+ return types.LowCardinality
+}
diff --git a/comp/core/tagger/taggerimpl/tagger.go b/comp/core/tagger/impl/tagger.go
similarity index 70%
rename from comp/core/tagger/taggerimpl/tagger.go
rename to comp/core/tagger/impl/tagger.go
index 37a84f40bfbbc..1712ddaa77383 100644
--- a/comp/core/tagger/taggerimpl/tagger.go
+++ b/comp/core/tagger/impl/tagger.go
@@ -3,6 +3,12 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
+// The Tagger is the central source of truth for client-side entity tagging.
+// It subscribes to workloadmeta to get updates for all the entity kinds
+// (containers, kubernetes pods, kubernetes nodes, etc.) and extracts the tags for each of them.
+// Tags are then stored in memory (by the TagStore) and can be queried by the tagger.Tag()
+// method.
+
// Package taggerimpl contains the implementation of the tagger component.
package taggerimpl
@@ -19,27 +25,22 @@ import (
api "github.com/DataDog/datadog-agent/comp/api/api/def"
"github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- taggerComp "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/local"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/remote"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/replay"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
coretelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ compdef "github.com/DataDog/datadog-agent/comp/def"
"github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
"github.com/DataDog/datadog-agent/pkg/util/common"
"github.com/DataDog/datadog-agent/pkg/util/containers/metrics"
"github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider"
- "github.com/DataDog/datadog-agent/pkg/util/fxutil"
httputils "github.com/DataDog/datadog-agent/pkg/util/http"
"github.com/DataDog/datadog-agent/pkg/util/optional"
-
- "go.uber.org/fx"
)
const (
@@ -57,50 +58,46 @@ type externalData struct {
podUID string
}
-type dependencies struct {
- fx.In
+// Requires defines the dependencies of the tagger component.
+type Requires struct {
+ compdef.In
- Lc fx.Lifecycle
+ Lc compdef.Lifecycle
Config config.Component
Log log.Component
Wmeta workloadmeta.Component
- Params taggerComp.Params
Telemetry coretelemetry.Component
+ Params tagger.Params
}
-type provides struct {
- fx.Out
+// Provides contains the fields provided by the tagger constructor.
+type Provides struct {
+ compdef.Out
- Comp taggerComp.Component
+ Comp tagger.Component
Endpoint api.AgentEndpointProvider
}
-// Module defines the fx options for this component.
-func Module() fxutil.Module {
- return fxutil.Component(
- fx.Provide(
- newTaggerClient,
- ))
-}
-
+// datadogConfig contains the configuration specific to Dogstatsd.
type datadogConfig struct {
+ // dogstatsdEntityIDPrecedenceEnabled disable enriching Dogstatsd metrics with tags from "origin detection" when Entity-ID is set.
dogstatsdEntityIDPrecedenceEnabled bool
- dogstatsdOptOutEnabled bool
- originDetectionUnifiedEnabled bool
+ // dogstatsdOptOutEnabled If enabled, and cardinality is none no origin detection is performed.
+ dogstatsdOptOutEnabled bool
+ // originDetectionUnifiedEnabled If enabled, all origin detection mechanisms will be unified to use the same logic.
+ originDetectionUnifiedEnabled bool
}
-// TaggerClient is a component that contains two tagger component: capturetagger and defaulttagger
-//
-// nolint:revive // TODO(containers) Fix revive linter
-type TaggerClient struct {
+// TaggerWrapper is a struct that contains two tagger component: capturetagger and the local tagger
+// and implements the tagger interface
+type TaggerWrapper struct {
// captureTagger is a tagger instance that contains a tagger that will contain the tagger
// state when replaying a capture scenario
- captureTagger taggerComp.Component
+ captureTagger tagger.Component
mux sync.RWMutex
- // defaultTagger is the shared tagger instance backing the global Tag and Init functions
- defaultTagger taggerComp.Component
+ defaultTagger tagger.Component
wmeta workloadmeta.Component
cfg config.Component
@@ -114,97 +111,80 @@ type TaggerClient struct {
log log.Component
}
-func createTaggerClient(defaultTagger taggerComp.Component, l log.Component) *TaggerClient {
- return &TaggerClient{
- defaultTagger: defaultTagger,
- log: l,
- }
-}
-
-// newTaggerClient returns a Component based on provided params, once it is provided,
-// fx will cache the component which is effectively a singleton instance, cached by fx.
-// it should be deprecated and removed
-func newTaggerClient(deps dependencies) provides {
- var taggerClient *TaggerClient
- telemetryStore := telemetry.NewStore(deps.Telemetry)
-
- switch deps.Params.AgentTypeForTagger {
- case taggerComp.CLCRunnerRemoteTaggerAgent:
- options, err := remote.CLCRunnerOptions(deps.Config)
-
- if err != nil {
- deps.Log.Errorf("unable to deps.Configure the remote tagger: %s", err)
- taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), deps.Log)
- } else if options.Disabled {
- deps.Log.Errorf("remote tagger is disabled in clc runner.")
- taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), deps.Log)
- } else {
- filter := types.NewFilterBuilder().Exclude(types.KubernetesPodUID).Build(types.HighCardinality)
- taggerClient = createTaggerClient(remote.NewTagger(options, deps.Config, telemetryStore, filter), deps.Log)
- }
- case taggerComp.NodeRemoteTaggerAgent:
- options, _ := remote.NodeAgentOptions(deps.Config)
- taggerClient = createTaggerClient(remote.NewTagger(options, deps.Config, telemetryStore, types.NewMatchAllFilter()), deps.Log)
- case taggerComp.LocalTaggerAgent:
- taggerClient = createTaggerClient(local.NewTagger(deps.Config, deps.Wmeta, telemetryStore), deps.Log)
- case taggerComp.FakeTagger:
- // all binaries are expected to provide their own tagger at startup. we
- // provide a fake tagger for testing purposes, as calling the global
- // tagger without proper initialization is very common there.
- taggerClient = createTaggerClient(local.NewFakeTagger(deps.Config, telemetryStore), deps.Log)
- }
+// NewComponent returns a new tagger client
+func NewComponent(req Requires) (Provides, error) {
+ taggerClient, err := NewTaggerClient(req.Params, req.Config, req.Wmeta, req.Log, req.Telemetry)
- if taggerClient != nil {
- taggerClient.wmeta = deps.Wmeta
+ if err != nil {
+ return Provides{}, err
}
- taggerClient.datadogConfig.dogstatsdEntityIDPrecedenceEnabled = deps.Config.GetBool("dogstatsd_entity_id_precedence")
- taggerClient.datadogConfig.originDetectionUnifiedEnabled = deps.Config.GetBool("origin_detection_unified")
- taggerClient.datadogConfig.dogstatsdOptOutEnabled = deps.Config.GetBool("dogstatsd_origin_optout_enabled")
- // we use to pull tagger metrics in dogstatsd. Pulling it later in the
- // pipeline improve memory allocation. We kept the old name to be
- // backward compatible and because origin detection only affect
- // dogstatsd metrics.
- taggerClient.tlmUDPOriginDetectionError = deps.Telemetry.NewCounter("dogstatsd", "udp_origin_detection_error", nil, "Dogstatsd UDP origin detection error count")
- taggerClient.telemetryStore = telemetryStore
-
- deps.Log.Info("TaggerClient is created, defaultTagger type: ", reflect.TypeOf(taggerClient.defaultTagger))
- deps.Lc.Append(fx.Hook{OnStart: func(_ context.Context) error {
- var err error
- checkCard := deps.Config.GetString("checks_tag_cardinality")
- dsdCard := deps.Config.GetString("dogstatsd_tag_cardinality")
- taggerClient.checksCardinality, err = types.StringToTagCardinality(checkCard)
- if err != nil {
- deps.Log.Warnf("failed to parse check tag cardinality, defaulting to low. Error: %s", err)
- taggerClient.checksCardinality = types.LowCardinality
- }
+ taggerClient.wmeta = req.Wmeta
- taggerClient.dogstatsdCardinality, err = types.StringToTagCardinality(dsdCard)
- if err != nil {
- deps.Log.Warnf("failed to parse dogstatsd tag cardinality, defaulting to low. Error: %s", err)
- taggerClient.dogstatsdCardinality = types.LowCardinality
- }
+ req.Log.Info("TaggerClient is created, defaultTagger type: ", reflect.TypeOf(taggerClient.defaultTagger))
+ req.Lc.Append(compdef.Hook{OnStart: func(_ context.Context) error {
// Main context passed to components, consistent with the one used in the workloadmeta component
mainCtx, _ := common.GetMainCtxCancel()
- err = taggerClient.Start(mainCtx)
- if err != nil && deps.Params.FallBackToLocalIfRemoteTaggerFails {
- deps.Log.Warnf("Starting remote tagger failed. Falling back to local tagger: %s", err)
- taggerClient.defaultTagger = local.NewTagger(deps.Config, deps.Wmeta, telemetryStore)
- // Retry to start the local tagger
- return taggerClient.Start(mainCtx)
- }
- return err
+ return taggerClient.Start(mainCtx)
}})
- deps.Lc.Append(fx.Hook{OnStop: func(context.Context) error {
+ req.Lc.Append(compdef.Hook{OnStop: func(context.Context) error {
return taggerClient.Stop()
}})
- return provides{
+
+ return Provides{
Comp: taggerClient,
Endpoint: api.NewAgentEndpointProvider(taggerClient.writeList, "/tagger-list", "GET"),
+ }, nil
+}
+
+// NewTaggerClient returns a new tagger client
+func NewTaggerClient(params tagger.Params, cfg config.Component, wmeta workloadmeta.Component, log log.Component, telemetryComp coretelemetry.Component) (*TaggerWrapper, error) {
+ var defaultTagger tagger.Component
+ var err error
+ telemetryStore := telemetry.NewStore(telemetryComp)
+ if params.UseFakeTagger {
+ defaultTagger = newFakeTagger(cfg, telemetryStore)
+ } else {
+ defaultTagger, err = newLocalTagger(cfg, wmeta, telemetryStore)
}
+
+ if err != nil {
+ return nil, err
+ }
+
+ wrapper := &TaggerWrapper{
+ defaultTagger: defaultTagger,
+ log: log,
+ telemetryStore: telemetryStore,
+ }
+
+ checkCard := cfg.GetString("checks_tag_cardinality")
+ dsdCard := cfg.GetString("dogstatsd_tag_cardinality")
+ wrapper.checksCardinality, err = types.StringToTagCardinality(checkCard)
+ if err != nil {
+ log.Warnf("failed to parse check tag cardinality, defaulting to low. Error: %s", err)
+ wrapper.checksCardinality = types.LowCardinality
+ }
+
+ wrapper.dogstatsdCardinality, err = types.StringToTagCardinality(dsdCard)
+ if err != nil {
+ log.Warnf("failed to parse dogstatsd tag cardinality, defaulting to low. Error: %s", err)
+ wrapper.dogstatsdCardinality = types.LowCardinality
+ }
+
+ wrapper.datadogConfig.dogstatsdEntityIDPrecedenceEnabled = cfg.GetBool("dogstatsd_entity_id_precedence")
+ wrapper.datadogConfig.originDetectionUnifiedEnabled = cfg.GetBool("origin_detection_unified")
+ wrapper.datadogConfig.dogstatsdOptOutEnabled = cfg.GetBool("dogstatsd_origin_optout_enabled")
+ // we use to pull tagger metrics in dogstatsd. Pulling it later in the
+ // pipeline improve memory allocation. We kept the old name to be
+ // backward compatible and because origin detection only affect
+ // dogstatsd metrics.
+ wrapper.tlmUDPOriginDetectionError = telemetryComp.NewCounter("dogstatsd", "udp_origin_detection_error", nil, "Dogstatsd UDP origin detection error count")
+
+ return wrapper, nil
}
-func (t *TaggerClient) writeList(w http.ResponseWriter, _ *http.Request) {
+func (t *TaggerWrapper) writeList(w http.ResponseWriter, _ *http.Request) {
response := t.List()
jsonTags, err := json.Marshal(response)
@@ -216,32 +196,32 @@ func (t *TaggerClient) writeList(w http.ResponseWriter, _ *http.Request) {
}
// Start calls defaultTagger.Start
-func (t *TaggerClient) Start(ctx context.Context) error {
+func (t *TaggerWrapper) Start(ctx context.Context) error {
return t.defaultTagger.Start(ctx)
}
// Stop calls defaultTagger.Stop
-func (t *TaggerClient) Stop() error {
+func (t *TaggerWrapper) Stop() error {
return t.defaultTagger.Stop()
}
// ReplayTagger returns the replay tagger instance
-func (t *TaggerClient) ReplayTagger() taggerComp.ReplayTagger {
- return replay.NewTagger(t.cfg, t.telemetryStore)
+func (t *TaggerWrapper) ReplayTagger() tagger.ReplayTagger {
+ return newReplayTagger(t.cfg, t.telemetryStore)
}
// GetTaggerTelemetryStore returns tagger telemetry store
-func (t *TaggerClient) GetTaggerTelemetryStore() *telemetry.Store {
+func (t *TaggerWrapper) GetTaggerTelemetryStore() *telemetry.Store {
return t.telemetryStore
}
// GetDefaultTagger returns the default Tagger in current instance
-func (t *TaggerClient) GetDefaultTagger() taggerComp.Component {
+func (t *TaggerWrapper) GetDefaultTagger() tagger.Component {
return t.defaultTagger
}
// GetEntity returns the hash for the provided entity id.
-func (t *TaggerClient) GetEntity(entityID types.EntityID) (*types.Entity, error) {
+func (t *TaggerWrapper) GetEntity(entityID types.EntityID) (*types.Entity, error) {
t.mux.RLock()
if t.captureTagger != nil {
entity, err := t.captureTagger.GetEntity(entityID)
@@ -258,7 +238,7 @@ func (t *TaggerClient) GetEntity(entityID types.EntityID) (*types.Entity, error)
// Tag queries the captureTagger (for replay scenarios) or the defaultTagger.
// It can return tags at high cardinality (with tags about individual containers),
// or at orchestrator cardinality (pod/task level).
-func (t *TaggerClient) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
+func (t *TaggerWrapper) Tag(entityID types.EntityID, cardinality types.TagCardinality) ([]string, error) {
// TODO: defer unlock once performance overhead of defer is negligible
t.mux.RLock()
if t.captureTagger != nil {
@@ -276,7 +256,7 @@ func (t *TaggerClient) Tag(entityID types.EntityID, cardinality types.TagCardina
// If possible, avoid using this function, and use the Tag method instead.
// This function exists in order not to break backward compatibility with rtloader and python
// integrations using the tagger
-func (t *TaggerClient) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
+func (t *TaggerWrapper) LegacyTag(entity string, cardinality types.TagCardinality) ([]string, error) {
prefix, id, err := taggercommon.ExtractPrefixAndID(entity)
if err != nil {
return nil, err
@@ -290,7 +270,7 @@ func (t *TaggerClient) LegacyTag(entity string, cardinality types.TagCardinality
// sources and appends them to the TagsAccumulator. It can return tags at high
// cardinality (with tags about individual containers), or at orchestrator
// cardinality (pod/task level).
-func (t *TaggerClient) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
+func (t *TaggerWrapper) AccumulateTagsFor(entityID types.EntityID, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
// TODO: defer unlock once performance overhead of defer is negligible
t.mux.RLock()
if t.captureTagger != nil {
@@ -306,7 +286,7 @@ func (t *TaggerClient) AccumulateTagsFor(entityID types.EntityID, cardinality ty
// GetEntityHash returns the hash for the tags associated with the given entity
// Returns an empty string if the tags lookup fails
-func (t *TaggerClient) GetEntityHash(entityID types.EntityID, cardinality types.TagCardinality) string {
+func (t *TaggerWrapper) GetEntityHash(entityID types.EntityID, cardinality types.TagCardinality) string {
tags, err := t.Tag(entityID, cardinality)
if err != nil {
return ""
@@ -316,7 +296,7 @@ func (t *TaggerClient) GetEntityHash(entityID types.EntityID, cardinality types.
// Standard queries the defaultTagger to get entity
// standard tags (env, version, service) from cache or sources.
-func (t *TaggerClient) Standard(entityID types.EntityID) ([]string, error) {
+func (t *TaggerWrapper) Standard(entityID types.EntityID) ([]string, error) {
t.mux.RLock()
// TODO(components) (tagger): captureTagger is a legacy global variable to be eliminated
if t.captureTagger != nil {
@@ -332,7 +312,7 @@ func (t *TaggerClient) Standard(entityID types.EntityID) ([]string, error) {
// AgentTags returns the agent tags
// It relies on the container provider utils to get the Agent container ID
-func (t *TaggerClient) AgentTags(cardinality types.TagCardinality) ([]string, error) {
+func (t *TaggerWrapper) AgentTags(cardinality types.TagCardinality) ([]string, error) {
ctrID, err := metrics.GetProvider(optional.NewOption(t.wmeta)).GetMetaCollector().GetSelfContainerID()
if err != nil {
return nil, err
@@ -348,7 +328,7 @@ func (t *TaggerClient) AgentTags(cardinality types.TagCardinality) ([]string, er
// GlobalTags queries global tags that should apply to all data coming from the
// agent.
-func (t *TaggerClient) GlobalTags(cardinality types.TagCardinality) ([]string, error) {
+func (t *TaggerWrapper) GlobalTags(cardinality types.TagCardinality) ([]string, error) {
t.mux.RLock()
if t.captureTagger != nil {
tags, err := t.captureTagger.Tag(taggercommon.GetGlobalEntityID(), cardinality)
@@ -363,7 +343,7 @@ func (t *TaggerClient) GlobalTags(cardinality types.TagCardinality) ([]string, e
// globalTagBuilder queries global tags that should apply to all data coming
// from the agent and appends them to the TagsAccumulator
-func (t *TaggerClient) globalTagBuilder(cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
+func (t *TaggerWrapper) globalTagBuilder(cardinality types.TagCardinality, tb tagset.TagsAccumulator) error {
t.mux.RLock()
if t.captureTagger != nil {
err := t.captureTagger.AccumulateTagsFor(taggercommon.GetGlobalEntityID(), cardinality, tb)
@@ -378,19 +358,19 @@ func (t *TaggerClient) globalTagBuilder(cardinality types.TagCardinality, tb tag
}
// List the content of the defaulTagger
-func (t *TaggerClient) List() types.TaggerListResponse {
+func (t *TaggerWrapper) List() types.TaggerListResponse {
return t.defaultTagger.List()
}
// SetNewCaptureTagger sets the tagger to be used when replaying a capture
-func (t *TaggerClient) SetNewCaptureTagger(newCaptureTagger taggerComp.Component) {
+func (t *TaggerWrapper) SetNewCaptureTagger(newCaptureTagger tagger.Component) {
t.mux.Lock()
defer t.mux.Unlock()
t.captureTagger = newCaptureTagger
}
// ResetCaptureTagger resets the capture tagger to nil
-func (t *TaggerClient) ResetCaptureTagger() {
+func (t *TaggerWrapper) ResetCaptureTagger() {
t.mux.Lock()
defer t.mux.Unlock()
t.captureTagger = nil
@@ -400,7 +380,10 @@ func (t *TaggerClient) ResetCaptureTagger() {
// NOTE(remy): it is not needed to sort/dedup the tags anymore since after the
// enrichment, the metric and its tags is sent to the context key generator, which
// is taking care of deduping the tags while generating the context key.
-func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggertypes.OriginInfo) {
+// This function is dupliacted in the remote tagger `impl-remote`.
+// When modifying this function make sure to update the copy `impl-remote` as well.
+// TODO: extract this function to a share function so it can be used in both implementations
+func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggertypes.OriginInfo) {
cardinality := taggerCardinality(originInfo.Cardinality, t.dogstatsdCardinality, t.log)
productOrigin := originInfo.ProductOrigin
@@ -548,19 +531,19 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty
}
// generateContainerIDFromExternalData generates a container ID from the external data
-func (t *TaggerClient) generateContainerIDFromExternalData(e externalData, metricsProvider provider.ContainerIDForPodUIDAndContNameRetriever) (string, error) {
+func (t *TaggerWrapper) generateContainerIDFromExternalData(e externalData, metricsProvider provider.ContainerIDForPodUIDAndContNameRetriever) (string, error) {
return metricsProvider.ContainerIDForPodUIDAndContName(e.podUID, e.containerName, e.init, time.Second)
}
// ChecksCardinality defines the cardinality of tags we should send for check metrics
// this can still be overridden when calling get_tags in python checks.
-func (t *TaggerClient) ChecksCardinality() types.TagCardinality {
+func (t *TaggerWrapper) ChecksCardinality() types.TagCardinality {
return t.checksCardinality
}
// DogstatsdCardinality defines the cardinality of tags we should send for metrics from
// dogstatsd.
-func (t *TaggerClient) DogstatsdCardinality() types.TagCardinality {
+func (t *TaggerWrapper) DogstatsdCardinality() types.TagCardinality {
return t.dogstatsdCardinality
}
@@ -583,6 +566,6 @@ func taggerCardinality(cardinality string,
}
// Subscribe calls defaultTagger.Subscribe
-func (t *TaggerClient) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
+func (t *TaggerWrapper) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) {
return t.defaultTagger.Subscribe(subscriptionID, filter)
}
diff --git a/comp/core/tagger/taggerimpl/tagger_test.go b/comp/core/tagger/impl/tagger_test.go
similarity index 73%
rename from comp/core/tagger/taggerimpl/tagger_test.go
rename to comp/core/tagger/impl/tagger_test.go
index a62f68403ed8c..74f37dc1a5ae6 100644
--- a/comp/core/tagger/taggerimpl/tagger_test.go
+++ b/comp/core/tagger/impl/tagger_test.go
@@ -11,11 +11,16 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "go.uber.org/fx"
+ "github.com/DataDog/datadog-agent/comp/core/config"
+ log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
+ noopTelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
@@ -49,7 +54,21 @@ func setupFakeMetricsProvider(mockMetricsProvider metrics.Provider) func() {
func TestEnrichTags(t *testing.T) {
// Create fake tagger
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
+ c := configmock.New(t)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+ fakeTagger := tagger.defaultTagger.(*FakeTagger)
+
containerName, initContainerName, containerID, initContainerID, podUID := "container-name", "init-container-name", "container-id", "init-container-id", "pod-uid"
// Fill fake tagger with entities
@@ -96,7 +115,7 @@ func TestEnrichTags(t *testing.T) {
} {
t.Run(tt.name, func(t *testing.T) {
tb := tagset.NewHashingTagsAccumulator()
- fakeTagger.EnrichTags(tb, tt.originInfo)
+ tagger.EnrichTags(tb, tt.originInfo)
assert.Equal(t, tt.expectedTags, tb.Get())
})
}
@@ -150,30 +169,59 @@ func TestEnrichTags(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
tt.setup()
tb := tagset.NewHashingTagsAccumulator()
- fakeTagger.EnrichTags(tb, tt.originInfo)
+ tagger.EnrichTags(tb, tt.originInfo)
assert.Equal(t, tt.expectedTags, tb.Get())
})
}
}
func TestEnrichTagsOrchestrator(t *testing.T) {
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
+ // Create fake tagger
+ c := configmock.New(t)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+
+ fakeTagger := tagger.defaultTagger.(*FakeTagger)
+
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, nil, nil)
tb := tagset.NewHashingTagsAccumulator()
- fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", Cardinality: "orchestrator"})
+ tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", Cardinality: "orchestrator"})
assert.Equal(t, []string{"container-low", "container-orch"}, tb.Get())
}
func TestEnrichTagsOptOut(t *testing.T) {
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
+ // Create fake tagger
+ c := configmock.New(t)
+ c.SetWithoutSource("dogstatsd_origin_optout_enabled", true)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+ fakeTagger := tagger.defaultTagger.(*FakeTagger)
- cfg := configmock.New(t)
- cfg.SetWithoutSource("dogstatsd_origin_optout_enabled", true)
fakeTagger.SetTags(types.NewEntityID(types.EntityIDPrefix("foo"), "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, nil, nil)
tb := tagset.NewHashingTagsAccumulator()
- fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", PodUID: "pod-uid", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD})
+ tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", PodUID: "pod-uid", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD})
assert.Equal(t, []string{}, tb.Get())
- fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD})
+ tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD})
assert.Equal(t, []string{}, tb.Get())
}
@@ -237,7 +285,7 @@ func TestGenerateContainerIDFromExternalData(t *testing.T) {
},
} {
t.Run(tt.name, func(t *testing.T) {
- fakeTagger := TaggerClient{}
+ fakeTagger := TaggerWrapper{}
containerID, err := fakeTagger.generateContainerIDFromExternalData(tt.externalData, tt.cidProvider)
assert.NoError(t, err)
assert.Equal(t, tt.expected, containerID)
@@ -246,7 +294,20 @@ func TestGenerateContainerIDFromExternalData(t *testing.T) {
}
func TestAgentTags(t *testing.T) {
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
+ c := configmock.New(t)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+ fakeTagger := tagger.defaultTagger.(*FakeTagger)
agentContainerID, podUID := "agentContainerID", "podUID"
mockMetricsProvider := collectormock.NewMetricsProvider()
@@ -259,34 +320,47 @@ func TestAgentTags(t *testing.T) {
// Expect metrics provider to return an empty container ID so no tags can be found
containerMetaCollector := collectormock.MetaCollector{ContainerID: ""}
mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector)
- tagList, err := fakeTagger.AgentTags(types.OrchestratorCardinality)
+ tagList, err := tagger.AgentTags(types.OrchestratorCardinality)
assert.Nil(t, err)
assert.Nil(t, tagList)
// Expect metrics provider to return the agent container ID so tags can be found
containerMetaCollector = collectormock.MetaCollector{ContainerID: agentContainerID}
mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector)
- tagList, err = fakeTagger.AgentTags(types.OrchestratorCardinality)
+ tagList, err = tagger.AgentTags(types.OrchestratorCardinality)
assert.NoError(t, err)
assert.Equal(t, []string{"container-low", "container-orch"}, tagList)
}
func TestGlobalTags(t *testing.T) {
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
+ c := configmock.New(t)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+ fakeTagger := tagger.defaultTagger.(*FakeTagger)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, []string{"container-high"}, nil)
fakeTagger.SetGlobalTags([]string{"global-low"}, []string{"global-orch"}, []string{"global-high"}, nil)
- globalTags, err := fakeTagger.GlobalTags(types.OrchestratorCardinality)
+ globalTags, err := tagger.GlobalTags(types.OrchestratorCardinality)
assert.Nil(t, err)
assert.Equal(t, []string{"global-low", "global-orch"}, globalTags)
tb := tagset.NewHashingTagsAccumulator()
- fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", Cardinality: "orchestrator"})
+ tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", Cardinality: "orchestrator"})
assert.Equal(t, []string{"container-low", "container-orch", "global-low", "global-orch"}, tb.Get())
}
func TestTaggerCardinality(t *testing.T) {
- fakeTagger := TaggerClient{}
+ fakeTagger := TaggerWrapper{}
tests := []struct {
name string
cardinality string
@@ -332,18 +406,18 @@ func TestTaggerCardinality(t *testing.T) {
}
func TestDefaultCardinality(t *testing.T) {
- cfg := configmock.New(t)
+
for _, tt := range []struct {
name string
wantChecksCardinality types.TagCardinality
wantDogstatsdCardinality types.TagCardinality
- setup func()
+ setup func(cfg config.Component)
}{
{
name: "successful parse config values, use config",
wantChecksCardinality: types.HighCardinality,
wantDogstatsdCardinality: types.OrchestratorCardinality,
- setup: func() {
+ setup: func(cfg config.Component) {
cfg.SetWithoutSource("checks_tag_cardinality", types.HighCardinalityString)
cfg.SetWithoutSource("dogstatsd_tag_cardinality", types.OrchestratorCardinalityString)
},
@@ -352,17 +426,31 @@ func TestDefaultCardinality(t *testing.T) {
name: "fail parse config values, use default",
wantChecksCardinality: types.LowCardinality,
wantDogstatsdCardinality: types.LowCardinality,
- setup: func() {
+ setup: func(cfg config.Component) {
cfg.SetWithoutSource("checks_tag_cardinality", "foo")
cfg.SetWithoutSource("dogstatsd_tag_cardinality", "foo")
},
},
} {
t.Run(tt.name, func(t *testing.T) {
- tt.setup()
- fakeTagger := fxutil.Test[tagger.Mock](t, MockModule())
- assert.Equal(t, tt.wantDogstatsdCardinality, fakeTagger.DogstatsdCardinality())
- assert.Equal(t, tt.wantChecksCardinality, fakeTagger.ChecksCardinality())
+ cfg := configmock.New(t)
+ tt.setup(cfg)
+
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return cfg }),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ )
+
+ tagger, err := NewTaggerClient(params, cfg, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+ assert.NoError(t, err)
+
+ assert.Equal(t, tt.wantDogstatsdCardinality, tagger.DogstatsdCardinality())
+ assert.Equal(t, tt.wantChecksCardinality, tagger.ChecksCardinality())
})
}
}
diff --git a/comp/core/tagger/mock/mock.go b/comp/core/tagger/mock/mock.go
new file mode 100644
index 0000000000000..30fcabd7ef0b8
--- /dev/null
+++ b/comp/core/tagger/mock/mock.go
@@ -0,0 +1,148 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+// +build test
+
+// Package mock contains the implementation of the mock for the tagger component.
+package mock
+
+import (
+ "net/http"
+ "testing"
+
+ "go.uber.org/fx"
+
+ "github.com/stretchr/testify/assert"
+
+ api "github.com/DataDog/datadog-agent/comp/api/api/def"
+ "github.com/DataDog/datadog-agent/comp/core/config"
+ log "github.com/DataDog/datadog-agent/comp/core/log/def"
+ logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
+ "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/types"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ noopTelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
+ configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Mock implements mock-specific methods for the tagger component.
+type Mock interface {
+ tagger.Component
+
+ // SetTags allows to set tags in the mock fake tagger
+ SetTags(entityID types.EntityID, source string, low, orch, high, std []string)
+
+ // SetGlobalTags allows to set tags in store for the global entity
+ SetGlobalTags(low, orch, high, std []string)
+}
+
+// mockTaggerClient is a mock of the tagger Component
+type mockTaggerClient struct {
+ *taggerimpl.TaggerWrapper
+}
+
+// mockHandleRequest is a simple mocked http.Handler function to test the route is registered correctly on the api component
+func (m *mockTaggerClient) mockHandleRequest(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("OK"))
+}
+
+// New returns a Mock
+func New(t testing.TB) Mock {
+ c := configmock.New(t)
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+ logComponent := logmock.New(t)
+ wmeta := fxutil.Test[workloadmeta.Component](t,
+ fx.Provide(func() log.Component { return logComponent }),
+ fx.Provide(func() config.Component { return c }),
+ workloadmetafx.Module(workloadmeta.NewParams()),
+ )
+
+ tagger, err := taggerimpl.NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent())
+
+ assert.NoError(t, err)
+
+ return &mockTaggerClient{
+ tagger,
+ }
+}
+
+// Provides is a struct containing the mock and the endpoint
+type Provides struct {
+ fx.Out
+
+ Comp Mock
+ Endpoint api.AgentEndpointProvider
+}
+
+type dependencies struct {
+ fx.In
+
+ Config config.Component
+ Log log.Component
+ WMeta workloadmeta.Component
+ Telemetry telemetry.Component
+}
+
+// NewMock returns a Provides
+func NewMock(deps dependencies) (Provides, error) {
+ params := tagger.Params{
+ UseFakeTagger: true,
+ }
+
+ tagger, err := taggerimpl.NewTaggerClient(params, deps.Config, deps.WMeta, deps.Log, deps.Telemetry)
+ if err != nil {
+ return Provides{}, err
+ }
+
+ c := &mockTaggerClient{
+ tagger,
+ }
+ return Provides{
+ Comp: c,
+ Endpoint: api.NewAgentEndpointProvider(c.mockHandleRequest, "/tagger-list", "GET"),
+ }, nil
+}
+
+// Module is a module containing the mock, useful for testing
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(NewMock),
+ fx.Supply(config.Params{}),
+ fx.Supply(log.Params{}),
+ fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }),
+ config.MockModule(),
+ sysprobeconfigimpl.MockModule(),
+ workloadmetafx.Module(workloadmeta.NewParams()),
+ telemetryimpl.MockModule(),
+ )
+}
+
+// SetupFakeTagger calls fxutil.Test to create a mock tagger for testing
+func SetupFakeTagger(t testing.TB) Mock {
+ return fxutil.Test[Mock](t, Module())
+}
+
+// SetTags calls faketagger SetTags which sets the tags for an entity
+func (m *mockTaggerClient) SetTags(entity types.EntityID, source string, low, orch, high, std []string) {
+ if v, ok := m.TaggerWrapper.GetDefaultTagger().(*taggerimpl.FakeTagger); ok {
+ v.SetTags(entity, source, low, orch, high, std)
+ }
+}
+
+// SetGlobalTags calls faketagger SetGlobalTags which sets the tags for the global entity
+func (m *mockTaggerClient) SetGlobalTags(low, orch, high, std []string) {
+ if v, ok := m.TaggerWrapper.GetDefaultTagger().(*taggerimpl.FakeTagger); ok {
+ v.SetGlobalTags(low, orch, high, std)
+ }
+}
diff --git a/comp/core/tagger/params.go b/comp/core/tagger/params.go
deleted file mode 100644
index 6b1bead6bf503..0000000000000
--- a/comp/core/tagger/params.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2023-present Datadog, Inc.
-
-package tagger
-
-import (
- "github.com/DataDog/datadog-agent/comp/core/config"
- pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
-)
-
-// AgentTypeForTagger represents agent types that tagger is used for
-type AgentTypeForTagger uint8
-
-// Define agent type for tagger
-const (
- LocalTaggerAgent AgentTypeForTagger = 1 << iota
- NodeRemoteTaggerAgent
- CLCRunnerRemoteTaggerAgent
- FakeTagger
-)
-
-// Params provides the kind of agent we're instantiating workloadmeta for
-type Params struct {
- AgentTypeForTagger AgentTypeForTagger
- FallBackToLocalIfRemoteTaggerFails bool
-}
-
-// NewTaggerParamsForCoreAgent is a constructor function for creating core agent tagger params
-func NewTaggerParamsForCoreAgent(_ config.Component) Params {
- if pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()) {
- return NewCLCRunnerRemoteTaggerParams()
- }
- return NewTaggerParams()
-}
-
-// NewTaggerParams creates a Params struct with the default LocalTagger type
-func NewTaggerParams() Params {
- return Params{AgentTypeForTagger: LocalTaggerAgent,
- FallBackToLocalIfRemoteTaggerFails: false}
-}
-
-// NewFakeTaggerParams creates a Params struct with the FakeTagger type and for testing purposes
-func NewFakeTaggerParams() Params {
- return Params{AgentTypeForTagger: FakeTagger,
- FallBackToLocalIfRemoteTaggerFails: false}
-}
-
-// NewNodeRemoteTaggerParams creates a Params struct with the NodeRemoteTagger type
-func NewNodeRemoteTaggerParams() Params {
- return Params{AgentTypeForTagger: NodeRemoteTaggerAgent,
- FallBackToLocalIfRemoteTaggerFails: false}
-}
-
-// NewNodeRemoteTaggerParamsWithFallback creates a Params struct with the NodeRemoteTagger type
-// and fallback to local tagger if remote tagger fails
-func NewNodeRemoteTaggerParamsWithFallback() Params {
- return Params{AgentTypeForTagger: NodeRemoteTaggerAgent,
- FallBackToLocalIfRemoteTaggerFails: true}
-}
-
-// NewCLCRunnerRemoteTaggerParams creates a Params struct with the CLCRunnerRemoteTagger type
-func NewCLCRunnerRemoteTaggerParams() Params {
- return Params{AgentTypeForTagger: CLCRunnerRemoteTaggerAgent,
- FallBackToLocalIfRemoteTaggerFails: false}
-}
diff --git a/comp/core/tagger/taggerimpl/server/server.go b/comp/core/tagger/server/server.go
similarity index 98%
rename from comp/core/tagger/taggerimpl/server/server.go
rename to comp/core/tagger/server/server.go
index ca51e24c4fc36..b0f9e7ff7a613 100644
--- a/comp/core/tagger/taggerimpl/server/server.go
+++ b/comp/core/tagger/server/server.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/proto"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
diff --git a/comp/core/tagger/taggerimpl/server/util.go b/comp/core/tagger/server/util.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/server/util.go
rename to comp/core/tagger/server/util.go
diff --git a/comp/core/tagger/taggerimpl/server/util_test.go b/comp/core/tagger/server/util_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/server/util_test.go
rename to comp/core/tagger/server/util_test.go
diff --git a/comp/core/tagger/taggerimpl/subscriber/subscriber.go b/comp/core/tagger/subscriber/subscriber.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/subscriber/subscriber.go
rename to comp/core/tagger/subscriber/subscriber.go
diff --git a/comp/core/tagger/taggerimpl/subscriber/subscription_manager.go b/comp/core/tagger/subscriber/subscription_manager.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/subscriber/subscription_manager.go
rename to comp/core/tagger/subscriber/subscription_manager.go
diff --git a/comp/core/tagger/taggerimpl/subscriber/subscription_manager_test.go b/comp/core/tagger/subscriber/subscription_manager_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/subscriber/subscription_manager_test.go
rename to comp/core/tagger/subscriber/subscription_manager_test.go
diff --git a/comp/core/tagger/taggerimpl/empty/empty.go b/comp/core/tagger/taggerimpl/empty/empty.go
deleted file mode 100644
index 46386c3c45cc4..0000000000000
--- a/comp/core/tagger/taggerimpl/empty/empty.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-// Package empty implements empty functions for the tagger component interface.
-package empty
-
-import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/types"
- taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types"
- "github.com/DataDog/datadog-agent/pkg/tagset"
-)
-
-// Tagger struct to embed in other taggers that do not implement some of the tagger component functions
-type Tagger struct{}
-
-// GetEntityHash returns the hash for the tags associated with the given entity
-// Returns an empty string if the tags lookup fails
-func (t *Tagger) GetEntityHash(types.EntityID, types.TagCardinality) string {
- return ""
-}
-
-// AgentTags returns the agent tags
-// It relies on the container provider utils to get the Agent container ID
-func (t *Tagger) AgentTags(types.TagCardinality) ([]string, error) {
- return []string{}, nil
-}
-
-// GlobalTags queries global tags that should apply to all data coming from the
-// agent.
-func (t *Tagger) GlobalTags(types.TagCardinality) ([]string, error) {
- return []string{}, nil
-}
-
-// SetNewCaptureTagger sets the tagger to be used when replaying a capture
-func (t *Tagger) SetNewCaptureTagger(tagger.Component) {}
-
-// ResetCaptureTagger resets the capture tagger to nil
-func (t *Tagger) ResetCaptureTagger() {}
-
-// EnrichTags extends a tag list with origin detection tags
-func (t *Tagger) EnrichTags(tagset.TagsAccumulator, taggertypes.OriginInfo) {}
-
-// ChecksCardinality defines the cardinality of tags we should send for check metrics
-func (t *Tagger) ChecksCardinality() types.TagCardinality {
- return types.LowCardinality
-}
-
-// DogstatsdCardinality defines the cardinality of tags we should send for metrics from
-// dogstatsd.
-func (t *Tagger) DogstatsdCardinality() types.TagCardinality {
- return types.LowCardinality
-}
diff --git a/comp/core/tagger/taggerimpl/tagger_mock.go b/comp/core/tagger/taggerimpl/tagger_mock.go
deleted file mode 100644
index 94fc806bd2bc5..0000000000000
--- a/comp/core/tagger/taggerimpl/tagger_mock.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-//go:build test
-// +build test
-
-package taggerimpl
-
-import (
- "net/http"
- "testing"
-
- "go.uber.org/fx"
-
- api "github.com/DataDog/datadog-agent/comp/api/api/def"
- "github.com/DataDog/datadog-agent/comp/core/config"
- log "github.com/DataDog/datadog-agent/comp/core/log/def"
- logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/local"
- "github.com/DataDog/datadog-agent/comp/core/tagger/types"
- "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
- workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
- workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
- "github.com/DataDog/datadog-agent/pkg/util/fxutil"
-)
-
-// MockTaggerClient is a mock of the tagger Component
-type MockTaggerClient struct {
- *TaggerClient
-}
-
-// mockHandleRequest is a simple mocked http.Handler function to test the route is registered correctly on the api component
-func (m *MockTaggerClient) mockHandleRequest(w http.ResponseWriter, _ *http.Request) {
- w.Write([]byte("OK"))
-}
-
-// MockProvides is a mock of the tagger.Component provides struct to test endpoints register properly
-type MockProvides struct {
- fx.Out
-
- Comp tagger.Mock
- Endpoint api.AgentEndpointProvider
-}
-
-var _ tagger.Component = (*MockTaggerClient)(nil)
-
-// NewMock returns a MockTagger
-func NewMock(deps dependencies) MockProvides {
- taggerClient := newTaggerClient(deps).Comp
- c := &MockTaggerClient{
- TaggerClient: taggerClient.(*TaggerClient),
- }
- return MockProvides{
- Comp: c,
- Endpoint: api.NewAgentEndpointProvider(c.mockHandleRequest, "/tagger-list", "GET"),
- }
-}
-
-// MockModule is a module containing the mock, useful for testing
-func MockModule() fxutil.Module {
- return fxutil.Component(
- fx.Provide(NewMock),
- fx.Supply(config.Params{}),
- fx.Supply(log.Params{}),
- fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }),
- config.MockModule(),
- sysprobeconfigimpl.MockModule(),
- fx.Supply(tagger.NewFakeTaggerParams()),
- workloadmetafx.Module(workloadmeta.NewParams()),
- telemetryimpl.MockModule(),
- )
-}
-
-// SetTags calls faketagger SetTags which sets the tags for an entity
-func (m *MockTaggerClient) SetTags(entity types.EntityID, source string, low, orch, high, std []string) {
- if m.TaggerClient == nil {
- panic("Tagger must be initialized before calling SetTags")
- }
- if v, ok := m.TaggerClient.defaultTagger.(*local.FakeTagger); ok {
- v.SetTags(entity, source, low, orch, high, std)
- }
-}
-
-// SetGlobalTags calls faketagger SetGlobalTags which sets the tags for the global entity
-func (m *MockTaggerClient) SetGlobalTags(low, orch, high, std []string) {
- if m.TaggerClient == nil {
- panic("Tagger must be initialized before calling SetTags")
- }
- if v, ok := m.TaggerClient.defaultTagger.(*local.FakeTagger); ok {
- v.SetGlobalTags(low, orch, high, std)
- }
-}
-
-// SetupFakeTagger calls fxutil.Test to create a mock tagger for testing
-func SetupFakeTagger(t *testing.T) tagger.Mock {
- return fxutil.Test[tagger.Mock](t, MockModule())
-}
diff --git a/comp/core/tagger/tags/go.mod b/comp/core/tagger/tags/go.mod
new file mode 100644
index 0000000000000..c80c048c0f5e1
--- /dev/null
+++ b/comp/core/tagger/tags/go.mod
@@ -0,0 +1,3 @@
+module github.com/DataDog/datadog-agent/comp/core/tagger/tags
+
+go 1.22.0
diff --git a/comp/core/tagger/taggerimpl/tagstore/entity_tags.go b/comp/core/tagger/tagstore/entity_tags.go
similarity index 99%
rename from comp/core/tagger/taggerimpl/tagstore/entity_tags.go
rename to comp/core/tagger/tagstore/entity_tags.go
index a5584254fd12b..3488a8e345c16 100644
--- a/comp/core/tagger/taggerimpl/tagstore/entity_tags.go
+++ b/comp/core/tagger/tagstore/entity_tags.go
@@ -11,7 +11,7 @@ import (
"strings"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/collectors"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/tagset"
"github.com/DataDog/datadog-agent/pkg/util/flavor"
diff --git a/comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go b/comp/core/tagger/tagstore/entity_tags_test.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/tagstore/entity_tags_test.go
rename to comp/core/tagger/tagstore/entity_tags_test.go
diff --git a/comp/core/tagger/taggerimpl/tagstore/source_tags.go b/comp/core/tagger/tagstore/source_tags.go
similarity index 100%
rename from comp/core/tagger/taggerimpl/tagstore/source_tags.go
rename to comp/core/tagger/tagstore/source_tags.go
diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore.go b/comp/core/tagger/tagstore/tagstore.go
similarity index 98%
rename from comp/core/tagger/taggerimpl/tagstore/tagstore.go
rename to comp/core/tagger/tagstore/tagstore.go
index 1757983bedb6c..0551b007b17e9 100644
--- a/comp/core/tagger/taggerimpl/tagstore/tagstore.go
+++ b/comp/core/tagger/tagstore/tagstore.go
@@ -17,8 +17,8 @@ import (
"github.com/benbjohnson/clock"
"github.com/DataDog/datadog-agent/comp/core/config"
- genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/generic_store"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/subscriber"
+ genericstore "github.com/DataDog/datadog-agent/comp/core/tagger/generic_store"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/subscriber"
"github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/status/health"
diff --git a/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go b/comp/core/tagger/tagstore/tagstore_bench_test.go
similarity index 91%
rename from comp/core/tagger/taggerimpl/local/tagstore_bench_test.go
rename to comp/core/tagger/tagstore/tagstore_bench_test.go
index 913bd7c2ee761..bca1470b63e98 100644
--- a/comp/core/tagger/taggerimpl/local/tagstore_bench_test.go
+++ b/comp/core/tagger/tagstore/tagstore_bench_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package local
+package tagstore
import (
"fmt"
@@ -13,7 +13,6 @@ import (
"testing"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/tagstore"
taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
@@ -49,7 +48,7 @@ func init() {
func BenchmarkTagStoreThroughput(b *testing.B) {
tel := fxutil.Test[telemetry.Component](b, telemetryimpl.MockModule())
telemetryStore := taggerTelemetry.NewStore(tel)
- store := tagstore.NewTagStore(configmock.New(b), telemetryStore)
+ store := NewTagStore(configmock.New(b), telemetryStore)
doneCh := make(chan struct{})
pruneTicker := time.NewTicker(time.Second)
@@ -96,7 +95,7 @@ func BenchmarkTagStore_processTagInfo(b *testing.B) {
tel := fxutil.Test[telemetry.Component](b, telemetryimpl.MockModule())
telemetryStore := taggerTelemetry.NewStore(tel)
- store := tagstore.NewTagStore(configmock.New(b), telemetryStore)
+ store := NewTagStore(configmock.New(b), telemetryStore)
for i := 0; i < b.N; i++ {
processRandomTagInfoBatch(store)
@@ -126,7 +125,7 @@ func generateRandomTags() []string {
return tags
}
-func processRandomTagInfoBatch(store *tagstore.TagStore) {
+func processRandomTagInfoBatch(store *TagStore) {
tagInfos := make([]*types.TagInfo, 0, batchSize)
for i := 0; i < batchSize; i++ {
tagInfos = append(tagInfos, generateRandomTagInfo())
diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go b/comp/core/tagger/tagstore/tagstore_test.go
similarity index 99%
rename from comp/core/tagger/taggerimpl/tagstore/tagstore_test.go
rename to comp/core/tagger/tagstore/tagstore_test.go
index 49b556d919afc..d797d770b0445 100644
--- a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go
+++ b/comp/core/tagger/tagstore/tagstore_test.go
@@ -15,7 +15,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/collectors"
taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
diff --git a/comp/core/workloadmeta/collectors/internal/ecs/v4parser.go b/comp/core/workloadmeta/collectors/internal/ecs/v4parser.go
index a78fbe0a69eb0..9fe87042b9ac3 100644
--- a/comp/core/workloadmeta/collectors/internal/ecs/v4parser.go
+++ b/comp/core/workloadmeta/collectors/internal/ecs/v4parser.go
@@ -87,7 +87,12 @@ func (c *collector) getTaskWithTagsFromV4Endpoint(ctx context.Context, task v1.T
func(d time.Duration) time.Duration { return time.Duration(c.metadataRetryTimeoutFactor) * d }),
).GetTaskWithTags(ctx)
if err != nil {
- log.Warnf("failed to get task with tags from metadata v4 API: %s", err)
+ // If it's a timeout error, log it as debug to avoid spamming the logs as the data can be fetched in next run
+ if errors.Is(err, context.DeadlineExceeded) {
+ log.Debugf("timeout while getting task with tags from metadata v4 API: %s", err)
+ } else {
+ log.Warnf("failed to get task with tags from metadata v4 API: %s", err)
+ }
return v1TaskToV4Task(task), err
}
diff --git a/comp/dogstatsd/listeners/udp.go b/comp/dogstatsd/listeners/udp.go
index f7b71c49e87ee..98a914a56bebf 100644
--- a/comp/dogstatsd/listeners/udp.go
+++ b/comp/dogstatsd/listeners/udp.go
@@ -36,12 +36,18 @@ func init() {
udpExpvars.Set("Bytes", &udpBytes)
}
+type netUDPConn interface {
+ LocalAddr() net.Addr
+ ReadFrom(b []byte) (int, net.Addr, error)
+ Close() error
+}
+
// UDPListener implements the StatsdListener interface for UDP protocol.
// It listens to a given UDP address and sends back packets ready to be
// processed.
// Origin detection is not implemented for UDP.
type UDPListener struct {
- conn *net.UDPConn
+ conn netUDPConn
packetsBuffer *packets.Buffer
packetAssembler *packets.Assembler
buffer []byte
diff --git a/comp/dogstatsd/listeners/udp_integration_test.go b/comp/dogstatsd/listeners/udp_integration_test.go
new file mode 100644
index 0000000000000..3899b7a99ba4f
--- /dev/null
+++ b/comp/dogstatsd/listeners/udp_integration_test.go
@@ -0,0 +1,117 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build !windows
+
+package listeners
+
+import (
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
+)
+
+func TestStartStopUDPListener(t *testing.T) {
+ port, err := getAvailableUDPPort()
+ require.NoError(t, err)
+ cfg := map[string]interface{}{}
+ cfg["dogstatsd_port"] = port
+ cfg["dogstatsd_non_local_traffic"] = false
+
+ deps := fulfillDepsWithConfig(t, cfg)
+ telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
+ packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
+ s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
+ require.NotNil(t, s)
+
+ assert.NoError(t, err)
+
+ s.Listen()
+ // Local port should be unavailable
+ address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
+ _, err = net.ListenUDP("udp", address)
+ assert.Error(t, err)
+
+ s.Stop()
+
+ // check that the port can be bound, try for 100 ms
+ for i := 0; i < 10; i++ {
+ var conn net.Conn
+ conn, err = net.ListenUDP("udp", address)
+ if err == nil {
+ conn.Close()
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ require.NoError(t, err, "port is not available, it should be")
+}
+
+func TestUDPNonLocal(t *testing.T) {
+ port, err := getAvailableUDPPort()
+ require.NoError(t, err)
+
+ cfg := map[string]interface{}{}
+ cfg["dogstatsd_port"] = port
+ cfg["dogstatsd_non_local_traffic"] = true
+ deps := fulfillDepsWithConfig(t, cfg)
+ telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
+ packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
+ s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
+ assert.NoError(t, err)
+ require.NotNil(t, s)
+
+ s.Listen()
+ defer s.Stop()
+
+ // Local port should be unavailable
+ address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
+ _, err = net.ListenUDP("udp", address)
+ assert.Error(t, err)
+
+ // External port should be unavailable
+ externalPort := fmt.Sprintf("%s:%d", getLocalIP(), port)
+ address, _ = net.ResolveUDPAddr("udp", externalPort)
+ _, err = net.ListenUDP("udp", address)
+ assert.Error(t, err)
+}
+
+func TestUDPLocalOnly(t *testing.T) {
+ port, err := getAvailableUDPPort()
+ require.NoError(t, err)
+
+ fmt.Println("port: ", port)
+
+ cfg := map[string]interface{}{}
+ cfg["dogstatsd_port"] = port
+ cfg["dogstatsd_non_local_traffic"] = false
+ deps := fulfillDepsWithConfig(t, cfg)
+ telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
+ packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
+ s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
+ assert.NoError(t, err)
+ require.NotNil(t, s)
+
+ s.Listen()
+ defer s.Stop()
+
+ // Local port should be unavailable
+ address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
+ _, err = net.ListenUDP("udp", address)
+ assert.Error(t, err)
+
+ // External port should be available
+ externalPort := fmt.Sprintf("%s:%d", getLocalIP(), port)
+ address, _ = net.ResolveUDPAddr("udp", externalPort)
+ conn, err := net.ListenUDP("udp", address)
+ require.NotNil(t, conn)
+ assert.NoError(t, err)
+ conn.Close()
+}
diff --git a/comp/dogstatsd/listeners/udp_test.go b/comp/dogstatsd/listeners/udp_test.go
index 8d359aabe7355..329e1d6c28261 100644
--- a/comp/dogstatsd/listeners/udp_test.go
+++ b/comp/dogstatsd/listeners/udp_test.go
@@ -7,6 +7,7 @@
package listeners
import (
+ "errors"
"fmt"
"net"
"strconv"
@@ -73,23 +74,14 @@ func TestUDPListenerTelemetry(t *testing.T) {
packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
s, err := NewUDPListener(packetChannel, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
require.NotNil(t, s)
-
assert.Nil(t, err)
+ mConn := defaultMConn(s.conn.LocalAddr(), []byte("hello world"))
+ s.conn.Close()
+ s.conn = mConn
s.Listen()
defer s.Stop()
- conn, err := net.Dial("udp", fmt.Sprintf("127.0.0.1:%d", port))
- require.Nil(t, err)
-
- defer func() {
- err := conn.Close()
- assert.Nil(t, err)
- }()
-
- _, err = conn.Write([]byte("hello world"))
- require.Nil(t, err)
-
select {
case pkts := <-packetChannel:
packet := pkts[0]
@@ -113,104 +105,6 @@ func TestUDPListenerTelemetry(t *testing.T) {
}
}
-func TestStartStopUDPListener(t *testing.T) {
- port, err := getAvailableUDPPort()
- require.Nil(t, err)
- cfg := map[string]interface{}{}
- cfg["dogstatsd_port"] = port
- cfg["dogstatsd_non_local_traffic"] = false
-
- deps := fulfillDepsWithConfig(t, cfg)
- telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
- packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
- s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
- require.NotNil(t, s)
-
- assert.Nil(t, err)
-
- s.Listen()
- // Local port should be unavailable
- address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
- _, err = net.ListenUDP("udp", address)
- assert.NotNil(t, err)
-
- s.Stop()
-
- // check that the port can be bound, try for 100 ms
- for i := 0; i < 10; i++ {
- var conn net.Conn
- conn, err = net.ListenUDP("udp", address)
- if err == nil {
- conn.Close()
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- require.NoError(t, err, "port is not available, it should be")
-}
-
-func TestUDPNonLocal(t *testing.T) {
- port, err := getAvailableUDPPort()
- require.Nil(t, err)
-
- cfg := map[string]interface{}{}
- cfg["dogstatsd_port"] = port
- cfg["dogstatsd_non_local_traffic"] = true
- deps := fulfillDepsWithConfig(t, cfg)
- telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
- packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
- s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
- assert.Nil(t, err)
- require.NotNil(t, s)
-
- s.Listen()
- defer s.Stop()
-
- // Local port should be unavailable
- address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
- _, err = net.ListenUDP("udp", address)
- assert.NotNil(t, err)
-
- // External port should be unavailable
- externalPort := fmt.Sprintf("%s:%d", getLocalIP(), port)
- address, _ = net.ResolveUDPAddr("udp", externalPort)
- _, err = net.ListenUDP("udp", address)
- assert.NotNil(t, err)
-}
-
-func TestUDPLocalOnly(t *testing.T) {
- port, err := getAvailableUDPPort()
- require.Nil(t, err)
-
- fmt.Println("port: ", port)
-
- cfg := map[string]interface{}{}
- cfg["dogstatsd_port"] = port
- cfg["dogstatsd_non_local_traffic"] = false
- deps := fulfillDepsWithConfig(t, cfg)
- telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
- packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
- s, err := NewUDPListener(nil, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
- assert.Nil(t, err)
- require.NotNil(t, s)
-
- s.Listen()
- defer s.Stop()
-
- // Local port should be unavailable
- address, _ := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
- _, err = net.ListenUDP("udp", address)
- assert.NotNil(t, err)
-
- // External port should be available
- externalPort := fmt.Sprintf("%s:%d", getLocalIP(), port)
- address, _ = net.ResolveUDPAddr("udp", externalPort)
- conn, err := net.ListenUDP("udp", address)
- require.NotNil(t, conn)
- assert.Nil(t, err)
- conn.Close()
-}
-
func TestUDPReceive(t *testing.T) {
var contents = []byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2")
port, err := getAvailableUDPPort()
@@ -226,13 +120,11 @@ func TestUDPReceive(t *testing.T) {
s, err := NewUDPListener(packetChannel, newPacketPoolManagerUDP(deps.Config, packetsTelemetryStore), deps.Config, nil, telemetryStore, packetsTelemetryStore)
require.Nil(t, err)
require.NotNil(t, s)
+ mockConn := defaultMConn(s.conn.LocalAddr(), contents)
+ s.conn.Close()
+ s.conn = mockConn
s.Listen()
defer s.Stop()
- conn, err := net.Dial("udp", fmt.Sprintf("127.0.0.1:%d", port))
- assert.Nil(t, err)
- require.NotNil(t, conn)
- defer conn.Close()
- conn.Write(contents)
select {
case pkts := <-packetChannel:
@@ -322,3 +214,32 @@ func getLocalIP() string {
}
return ""
}
+
+func defaultMConn(addr net.Addr, bs ...[]byte) *udpMock {
+ return &udpMock{bufferList: append([][]byte{}, bs...), address: addr}
+}
+
+type udpMock struct {
+ bufferList [][]byte
+ address net.Addr
+ nextMsg int
+}
+
+func (conn udpMock) LocalAddr() net.Addr {
+ return conn.address
+}
+
+func (conn *udpMock) ReadFrom(b []byte) (int, net.Addr, error) {
+ if conn.nextMsg == len(conn.bufferList) {
+ return 0, conn.address, errors.New("Attempted use of closed network connection")
+ }
+ buffer := conn.bufferList[conn.nextMsg]
+ conn.nextMsg++
+ n := copy(b, buffer)
+
+ return n, conn.address, nil
+}
+
+func (conn udpMock) Close() error {
+ return nil
+}
diff --git a/comp/dogstatsd/listeners/uds_common.go b/comp/dogstatsd/listeners/uds_common.go
index b8480c8ff2ef7..62a39421dcc12 100644
--- a/comp/dogstatsd/listeners/uds_common.go
+++ b/comp/dogstatsd/listeners/uds_common.go
@@ -16,6 +16,7 @@ import (
"strconv"
"strings"
"sync"
+ "syscall"
"time"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
@@ -76,10 +77,26 @@ type UDSListener struct {
packetsTelemetryStore *packets.TelemetryStore
}
+// Wrapper for net.UnixConn
+type netUnixConn interface {
+ Close() error
+ LocalAddr() net.Addr
+ Read(b []byte) (int, error)
+ ReadFromUnix(b []byte) (int, *net.UnixAddr, error)
+ ReadMsgUnix(b []byte, oob []byte) (n int, oobn int, flags int, addr *net.UnixAddr, err error)
+ SyscallConn() (syscall.RawConn, error)
+ SetReadBuffer(bytes int) error
+ RemoteAddr() net.Addr
+ SetDeadline(t time.Time) error
+ SetReadDeadline(t time.Time) error
+ SetWriteDeadline(t time.Time) error
+ Write(b []byte) (n int, err error)
+}
+
// CloseFunction is a function that closes a connection
-type CloseFunction func(unixConn *net.UnixConn) error
+type CloseFunction func(unixConn netUnixConn) error
-func setupUnixConn(conn *net.UnixConn, originDetection bool, config model.Reader) (bool, error) {
+func setupUnixConn(conn netUnixConn, originDetection bool, config model.Reader) (bool, error) {
if originDetection {
err := enableUDSPassCred(conn)
if err != nil {
@@ -168,7 +185,7 @@ func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *pac
}
// Listen runs the intake loop. Should be called in its own goroutine
-func (l *UDSListener) handleConnection(conn *net.UnixConn, closeFunc CloseFunction) error {
+func (l *UDSListener) handleConnection(conn netUnixConn, closeFunc CloseFunction) error {
listenerID := l.getListenerID(conn)
tlmListenerID := listenerID
telemetryWithFullListenerID := l.telemetryWithListenerID
@@ -360,7 +377,7 @@ func (l *UDSListener) handleConnection(conn *net.UnixConn, closeFunc CloseFuncti
}
}
-func (l *UDSListener) getConnID(conn *net.UnixConn) string {
+func (l *UDSListener) getConnID(conn netUnixConn) string {
// We use the file descriptor as a unique identifier for the connection. This might
// increase the cardinality in the backend, but this option is not designed to be enabled
// all the time. Plus is it useful to debug issues with the UDS listener since we will be
@@ -374,7 +391,7 @@ func (l *UDSListener) getConnID(conn *net.UnixConn) string {
}
return strconv.Itoa(int(fdConn))
}
-func (l *UDSListener) getListenerID(conn *net.UnixConn) string {
+func (l *UDSListener) getListenerID(conn netUnixConn) string {
listenerID := "uds-" + conn.LocalAddr().Network()
connID := l.getConnID(conn)
if connID != "" {
diff --git a/comp/dogstatsd/listeners/uds_common_test.go b/comp/dogstatsd/listeners/uds_common_test.go
index 3d74e49da8a70..699d5974b65ab 100644
--- a/comp/dogstatsd/listeners/uds_common_test.go
+++ b/comp/dogstatsd/listeners/uds_common_test.go
@@ -10,9 +10,13 @@
package listeners
import (
+ "errors"
+ "io"
"net"
"os"
+ "syscall"
"testing"
+ "time"
"golang.org/x/net/nettest"
@@ -49,7 +53,7 @@ func newPacketPoolManagerUDS(cfg config.Component, packetsTelemetryStore *packet
func testFileExistsNewUDSListener(t *testing.T, socketPath string, cfg map[string]interface{}, listenerFactory udsListenerFactory) {
_, err := os.Create(socketPath)
- assert.Nil(t, err)
+ assert.NoError(t, err)
defer os.Remove(socketPath)
deps := fulfillDepsWithConfig(t, cfg)
telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
@@ -60,9 +64,9 @@ func testFileExistsNewUDSListener(t *testing.T, socketPath string, cfg map[strin
func testSocketExistsNewUSDListener(t *testing.T, socketPath string, cfg map[string]interface{}, listenerFactory udsListenerFactory) {
address, err := net.ResolveUnixAddr("unix", socketPath)
- assert.Nil(t, err)
+ assert.NoError(t, err)
_, err = net.ListenUnix("unix", address)
- assert.Nil(t, err)
+ assert.NoError(t, err)
testWorkingNewUDSListener(t, socketPath, cfg, listenerFactory)
}
@@ -73,10 +77,10 @@ func testWorkingNewUDSListener(t *testing.T, socketPath string, cfg map[string]i
s, err := listenerFactory(nil, newPacketPoolManagerUDS(deps.Config, packetsTelemetryStore), deps.Config, deps.PidMap, telemetryStore, packetsTelemetryStore, deps.Telemetry)
defer s.Stop()
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.NotNil(t, s)
fi, err := os.Stat(socketPath)
- require.Nil(t, err)
+ require.NoError(t, err)
assert.Equal(t, "Srwx-w--w-", fi.Mode().String())
}
@@ -108,17 +112,117 @@ func testStartStopUDSListener(t *testing.T, listenerFactory udsListenerFactory,
telemetryStore := NewTelemetryStore(nil, deps.Telemetry)
packetsTelemetryStore := packets.NewTelemetryStore(nil, deps.Telemetry)
s, err := listenerFactory(nil, newPacketPoolManagerUDS(deps.Config, packetsTelemetryStore), deps.Config, deps.PidMap, telemetryStore, packetsTelemetryStore, deps.Telemetry)
- assert.Nil(t, err)
+ assert.NoError(t, err)
assert.NotNil(t, s)
s.Listen()
conn, err := net.Dial(transport, socketPath)
- assert.Nil(t, err)
+ assert.NoError(t, err)
conn.Close()
s.Stop()
_, err = net.Dial(transport, socketPath)
- assert.NotNil(t, err)
+ assert.Error(t, err)
+}
+
+func defaultMUnixConn(addr net.Addr, streamMode bool) *mockUnixConn {
+ return &mockUnixConn{addr: addr, streamMode: streamMode, stop: make(chan struct{}, 5), timeout: time.Second * 2}
+}
+
+type mockUnixConn struct {
+ addr net.Addr
+ buffer [][]byte
+ offset int
+ stop chan struct{}
+ streamMode bool
+ timeout time.Duration
+}
+
+func (conn *mockUnixConn) Write(b []byte) (int, error) {
+ if conn.streamMode {
+ return conn.writeStream(b)
+ }
+ return conn.writeDatagram(b)
+}
+
+func (conn *mockUnixConn) writeDatagram(b []byte) (int, error) {
+ conn.buffer = append(conn.buffer, b)
+ return len(b), nil
+}
+
+func (conn *mockUnixConn) writeStream(b []byte) (int, error) {
+ if len(conn.buffer) == 0 {
+ conn.buffer = [][]byte{{}}
+ }
+ conn.buffer[0] = append(conn.buffer[0], b...)
+ return len(b), nil
+}
+
+func (conn *mockUnixConn) Close() error {
+ conn.stop <- struct{}{}
+ return nil
+}
+func (conn *mockUnixConn) LocalAddr() net.Addr { return conn.addr }
+func (conn *mockUnixConn) Read(b []byte) (int, error) {
+ if conn.streamMode {
+ return conn.readStream(b)
+ }
+ return conn.readDatagram(b)
+}
+
+func (conn *mockUnixConn) readDatagram(b []byte) (int, error) {
+ if conn.offset >= len(conn.buffer) {
+ select {
+ case <-conn.stop:
+ return 0, io.EOF
+ case <-time.After(conn.timeout):
+ return 0, errors.New("Test timed out without being closed")
+ }
+ }
+
+ n := copy(b, conn.buffer[conn.offset])
+ conn.offset++
+ return n, nil
+}
+
+func (conn *mockUnixConn) readStream(b []byte) (int, error) {
+ if conn.offset >= len(conn.buffer[0]) {
+ select {
+ case <-conn.stop:
+ return 0, io.EOF
+ case <-time.After(conn.timeout):
+ return 0, errors.New("Test timed out without being closed")
+ }
+ }
+ n := copy(b, conn.buffer[0][conn.offset:])
+ conn.offset += n
+ return n, nil
+}
+
+func (conn *mockUnixConn) ReadFromUnix(b []byte) (int, *net.UnixAddr, error) {
+ n, _ := conn.Read(b)
+ return n, nil, nil
+}
+func (conn *mockUnixConn) ReadMsgUnix(_ []byte, _ []byte) (n int, oobn int, flags int, addr *net.UnixAddr, err error) {
+ return 0, 0, 0, nil, nil
+}
+func (conn *mockUnixConn) SyscallConn() (syscall.RawConn, error) {
+ return nil, errors.New("Unimplemented")
+}
+func (conn *mockUnixConn) SetReadBuffer(_ int) error {
+ return errors.New("Unimplemented")
+}
+func (conn *mockUnixConn) RemoteAddr() net.Addr {
+ return conn.addr
+}
+func (conn *mockUnixConn) SetDeadline(_ time.Time) error {
+ return errors.New("Unimplemented")
+}
+func (conn *mockUnixConn) SetReadDeadline(_ time.Time) error {
+ return errors.New("Unimplemented")
+}
+func (conn *mockUnixConn) SetWriteDeadline(_ time.Time) error {
+ return errors.New("Unimplemented")
}
diff --git a/comp/dogstatsd/listeners/uds_datagram.go b/comp/dogstatsd/listeners/uds_datagram.go
index 654cbad487bed..be834e4ba2b09 100644
--- a/comp/dogstatsd/listeners/uds_datagram.go
+++ b/comp/dogstatsd/listeners/uds_datagram.go
@@ -77,7 +77,7 @@ func (l *UDSDatagramListener) Listen() {
func (l *UDSDatagramListener) listen() {
log.Infof("dogstatsd-uds: starting to listen on %s", l.conn.LocalAddr())
- _ = l.handleConnection(l.conn, func(conn *net.UnixConn) error {
+ _ = l.handleConnection(l.conn, func(conn netUnixConn) error {
return conn.Close()
})
}
diff --git a/comp/dogstatsd/listeners/uds_datagram_test.go b/comp/dogstatsd/listeners/uds_datagram_test.go
index 3406fcfdff0a2..4525bc5848abb 100644
--- a/comp/dogstatsd/listeners/uds_datagram_test.go
+++ b/comp/dogstatsd/listeners/uds_datagram_test.go
@@ -10,7 +10,6 @@
package listeners
import (
- "net"
"testing"
"time"
@@ -56,16 +55,14 @@ func TestUDSDatagramReceive(t *testing.T) {
assert.Nil(t, err)
assert.NotNil(t, s)
- s.Listen()
defer s.Stop()
- conn, err := net.Dial("unixgram", socketPath)
- assert.Nil(t, err)
- defer conn.Close()
- conn.Write([]byte{})
- conn.Write(contents0)
- conn.Write(contents1)
+ mConn := defaultMUnixConn(s.(*UDSDatagramListener).conn.LocalAddr(), false)
+ mConn.Write([]byte{})
+ mConn.Write(contents0)
+ mConn.Write(contents1)
+ go s.(*UDSDatagramListener).handleConnection(mConn, func(c netUnixConn) error { return c.Close() })
select {
case pkts := <-packetsChannel:
assert.Equal(t, 3, len(pkts))
@@ -132,5 +129,4 @@ func TestUDSDatagramReceive(t *testing.T) {
case <-time.After(2 * time.Second):
assert.FailNow(t, "Timeout on receive channel")
}
-
}
diff --git a/comp/dogstatsd/listeners/uds_linux.go b/comp/dogstatsd/listeners/uds_linux.go
index 9958790658c37..62a43fd225cb4 100644
--- a/comp/dogstatsd/listeners/uds_linux.go
+++ b/comp/dogstatsd/listeners/uds_linux.go
@@ -8,7 +8,6 @@ package listeners
import (
"errors"
"fmt"
- "net"
"strconv"
"time"
@@ -41,7 +40,7 @@ func getUDSAncillarySize() int {
// enableUDSPassCred enables credential passing from the kernel for origin detection.
// That flag can be ignored if origin dection is disabled.
-func enableUDSPassCred(conn *net.UnixConn) error {
+func enableUDSPassCred(conn netUnixConn) error {
rawconn, err := conn.SyscallConn()
if err != nil {
return err
diff --git a/comp/dogstatsd/listeners/uds_nolinux.go b/comp/dogstatsd/listeners/uds_nolinux.go
index 4143103f2dfc9..4ab0b62117646 100644
--- a/comp/dogstatsd/listeners/uds_nolinux.go
+++ b/comp/dogstatsd/listeners/uds_nolinux.go
@@ -9,7 +9,6 @@ package listeners
import (
"errors"
- "net"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
@@ -28,7 +27,7 @@ func getUDSAncillarySize() int {
// enableUDSPassCred returns a "not implemented" error on non-linux hosts
//
//nolint:revive // TODO(AML) Fix revive linter
-func enableUDSPassCred(_ *net.UnixConn) error {
+func enableUDSPassCred(_ netUnixConn) error {
return ErrLinuxOnly
}
diff --git a/comp/dogstatsd/listeners/uds_stream.go b/comp/dogstatsd/listeners/uds_stream.go
index 494f78a93fbc6..48a11e86cdccb 100644
--- a/comp/dogstatsd/listeners/uds_stream.go
+++ b/comp/dogstatsd/listeners/uds_stream.go
@@ -86,7 +86,7 @@ func (l *UDSStreamListener) listen() {
}
go func() {
l.connTracker.Track(conn)
- _ = l.handleConnection(conn, func(c *net.UnixConn) error {
+ _ = l.handleConnection(conn, func(c netUnixConn) error {
l.connTracker.Close(c)
return nil
})
diff --git a/comp/dogstatsd/listeners/uds_stream_test.go b/comp/dogstatsd/listeners/uds_stream_test.go
index 34d607ec11a4d..05359bd0d943e 100644
--- a/comp/dogstatsd/listeners/uds_stream_test.go
+++ b/comp/dogstatsd/listeners/uds_stream_test.go
@@ -11,7 +11,6 @@ package listeners
import (
"encoding/binary"
- "net"
"testing"
"time"
@@ -56,17 +55,16 @@ func TestUDSStreamReceive(t *testing.T) {
assert.Nil(t, err)
assert.NotNil(t, s)
- s.Listen()
+ mConn := defaultMUnixConn(s.(*UDSStreamListener).conn.Addr(), true)
defer s.Stop()
- conn, err := net.Dial("unix", socketPath)
- assert.Nil(t, err)
- defer conn.Close()
- binary.Write(conn, binary.LittleEndian, int32(len(contents0)))
- conn.Write(contents0)
+ binary.Write(mConn, binary.LittleEndian, int32(len(contents0)))
+ mConn.Write(contents0)
+
+ binary.Write(mConn, binary.LittleEndian, int32(len(contents1)))
+ mConn.Write(contents1)
- binary.Write(conn, binary.LittleEndian, int32(len(contents1)))
- conn.Write(contents1)
+ go s.(*UDSStreamListener).handleConnection(mConn, func(c netUnixConn) error { return c.Close() })
select {
case pkts := <-packetsChannel:
@@ -87,5 +85,4 @@ func TestUDSStreamReceive(t *testing.T) {
case <-time.After(2 * time.Second):
assert.FailNow(t, "Timeout on receive channel")
}
-
}
diff --git a/comp/dogstatsd/replay/impl/capture.go b/comp/dogstatsd/replay/impl/capture.go
index 29817627034f4..cac424c81f4ac 100644
--- a/comp/dogstatsd/replay/impl/capture.go
+++ b/comp/dogstatsd/replay/impl/capture.go
@@ -16,7 +16,7 @@ import (
"github.com/spf13/afero"
configComponent "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
compdef "github.com/DataDog/datadog-agent/comp/def"
"github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def"
diff --git a/comp/dogstatsd/replay/impl/writer.go b/comp/dogstatsd/replay/impl/writer.go
index bfe3a03d1397f..f5a85f604165b 100644
--- a/comp/dogstatsd/replay/impl/writer.go
+++ b/comp/dogstatsd/replay/impl/writer.go
@@ -22,8 +22,8 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
"github.com/DataDog/datadog-agent/comp/core/tagger/common"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
taggerproto "github.com/DataDog/datadog-agent/comp/core/tagger/proto"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
diff --git a/comp/dogstatsd/replay/impl/writer_test.go b/comp/dogstatsd/replay/impl/writer_test.go
index bdb3852d03bdb..a3e8222301c2c 100644
--- a/comp/dogstatsd/replay/impl/writer_test.go
+++ b/comp/dogstatsd/replay/impl/writer_test.go
@@ -20,8 +20,7 @@ import (
"go.uber.org/atomic"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
telemetrynoop "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl"
"github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
@@ -40,7 +39,7 @@ func writerTest(t *testing.T, z bool) {
cfg := config.NewMock(t)
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := mock.SetupFakeTagger(t)
writer := NewTrafficCaptureWriter(1, taggerComponent)
diff --git a/comp/dogstatsd/server/batch.go b/comp/dogstatsd/server/batch.go
index d340be63c4121..7ad59ff21e3b9 100644
--- a/comp/dogstatsd/server/batch.go
+++ b/comp/dogstatsd/server/batch.go
@@ -24,6 +24,15 @@ import (
"github.com/DataDog/datadog-agent/pkg/tagset"
)
+// interface requiring all functions expected by the dogstatsd server
+type dogstatsdBatcher interface {
+ appendSample(sample metrics.MetricSample)
+ appendEvent(event *event.Event)
+ appendServiceCheck(serviceCheck *servicecheck.ServiceCheck)
+ appendLateSample(sample metrics.MetricSample)
+ flush()
+}
+
// batcher batches multiple metrics before submission
// this struct is not safe for concurrent use
type batcher struct {
diff --git a/comp/dogstatsd/server/server.go b/comp/dogstatsd/server/server.go
index 059fdafdff191..52d53d93c17c1 100644
--- a/comp/dogstatsd/server/server.go
+++ b/comp/dogstatsd/server/server.go
@@ -572,9 +572,9 @@ func dropCR(data []byte) []byte {
return data
}
-// ScanLines is an almost identical reimplementation of bufio.ScanLines, but also
+// scanLines is an almost identical reimplementation of bufio.scanLines, but also
// reports if the returned line is newline-terminated
-func ScanLines(data []byte, atEOF bool) (advance int, token []byte, eol bool, err error) {
+func scanLines(data []byte, atEOF bool) (advance int, token []byte, eol bool, err error) {
if atEOF && len(data) == 0 {
return 0, nil, false, nil
}
@@ -595,7 +595,7 @@ func nextMessage(packet *[]byte, eolTermination bool) (message []byte) {
return nil
}
- advance, message, eol, err := ScanLines(*packet, true)
+ advance, message, eol, err := scanLines(*packet, true)
if err != nil {
return nil
}
@@ -634,7 +634,7 @@ func (s *server) errLog(format string, params ...interface{}) {
}
// workers are running this function in their goroutine
-func (s *server) parsePackets(batcher *batcher, parser *parser, packets []*packets.Packet, samples metrics.MetricSampleBatch) metrics.MetricSampleBatch {
+func (s *server) parsePackets(batcher dogstatsdBatcher, parser *parser, packets []*packets.Packet, samples metrics.MetricSampleBatch) metrics.MetricSampleBatch {
for _, packet := range packets {
s.log.Tracef("Dogstatsd receive: %q", packet.Contents)
for {
diff --git a/comp/dogstatsd/server/server_integration_test.go b/comp/dogstatsd/server/server_integration_test.go
new file mode 100644
index 0000000000000..ab693b7462f13
--- /dev/null
+++ b/comp/dogstatsd/server/server_integration_test.go
@@ -0,0 +1,203 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package server
+
+import (
+ "context"
+ "net"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/listeners"
+)
+
+func TestStopServer(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ _, s := fulfillDepsWithInactiveServer(t, cfg)
+ s.start(context.TODO())
+ requireStart(t, s)
+
+ s.stop(context.TODO())
+
+ // check that the port can be bound, try for 100 ms
+ address, err := net.ResolveUDPAddr("udp", s.UDPLocalAddr())
+ require.NoError(t, err, "cannot resolve address")
+
+ for i := 0; i < 10; i++ {
+ var conn net.Conn
+ conn, err = net.ListenUDP("udp", address)
+ if err == nil {
+ conn.Close()
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ require.NoError(t, err, "port is not available, it should be")
+}
+
+func TestUDPConn(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ s := deps.Server.(*server)
+ requireStart(t, s)
+
+ conn, err := net.Dial("udp", s.UDPLocalAddr())
+ require.NoError(t, err, "cannot connect to DSD socket")
+ defer conn.Close()
+
+ runConnTest(t, conn, deps)
+
+ s.stop(context.TODO())
+
+ // check that the port can be bound, try for 100 ms
+ address, err := net.ResolveUDPAddr("udp", s.UDPLocalAddr())
+ require.NoError(t, err, "cannot resolve address")
+
+ for i := 0; i < 10; i++ {
+ var conn net.Conn
+ conn, err = net.ListenUDP("udp", address)
+ if err == nil {
+ conn.Close()
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ require.NoError(t, err, "port is not available, it should be")
+}
+
+func TestUDPForward(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ pc, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ pcHost, pcPort, err := net.SplitHostPort(pc.LocalAddr().String())
+ require.NoError(t, err)
+
+ // Setup UDP server to forward to
+ cfg["statsd_forward_port"] = pcPort
+ cfg["statsd_forward_host"] = pcHost
+
+ // Setup dogstatsd server
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+
+ defer pc.Close()
+
+ requireStart(t, deps.Server)
+
+ conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
+ require.NoError(t, err)
+ require.NotNil(t, conn)
+ defer conn.Close()
+
+ // Check if message is forwarded
+ message := []byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2")
+
+ _, err = conn.Write(message)
+ require.NoError(t, err, "cannot write to DSD socket")
+
+ _ = pc.SetReadDeadline(time.Now().Add(4 * time.Second))
+
+ buffer := make([]byte, len(message))
+ _, _, err = pc.ReadFrom(buffer)
+ require.NoError(t, err)
+
+ assert.Equal(t, message, buffer)
+}
+
+func TestUDSConn(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("UDS isn't supported on windows")
+ }
+ socketPath := filepath.Join(t.TempDir(), "dsd.socket")
+
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
+ cfg["dogstatsd_socket"] = socketPath
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ require.True(t, deps.Server.UdsListenerRunning())
+
+ conn, err := net.Dial("unixgram", socketPath)
+ require.NoError(t, err, "cannot connect to DSD socket")
+ defer conn.Close()
+
+ runConnTest(t, conn, deps)
+
+ s := deps.Server.(*server)
+ s.Stop()
+ _, err = net.Dial("unixgram", socketPath)
+ require.Error(t, err, "UDS listener should be closed")
+}
+
+func runConnTest(t *testing.T, conn net.Conn, deps serverDeps) {
+ demux := deps.Demultiplexer
+ eventOut, serviceOut := demux.GetEventsAndServiceChecksChannels()
+
+ // Test metric
+ conn.Write(defaultMetricInput)
+ samples, timedSamples := demux.WaitForSamples(time.Second * 2)
+
+ assert.Equal(t, 1, len(samples), "expected one metric entries after 2 seconds")
+ assert.Equal(t, 0, len(timedSamples), "did not expect any timed metrics")
+
+ defaultMetric().testMetric(t, samples[0])
+
+ // Test servce checks
+ conn.Write(defaultServiceInput)
+ select {
+ case servL := <-serviceOut:
+ assert.Equal(t, 1, len(servL))
+ defaultServiceCheck().testService(t, servL[0])
+ case <-time.After(2 * time.Second):
+ assert.FailNow(t, "Timeout on service channel")
+ }
+
+ // Test event
+ conn.Write(defaultEventInput)
+ select {
+ case eventL := <-eventOut:
+ assert.Equal(t, 1, len(eventL))
+ defaultEvent().testEvent(t, eventL[0])
+ case <-time.After(2 * time.Second):
+ assert.FailNow(t, "Timeout on event channel")
+ }
+}
+
+func TestUDSReceiverNoDir(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("UDS isn't supported on windows")
+ }
+ socketPath := filepath.Join(t.TempDir(), "nonexistent", "dsd.socket") // nonexistent dir, listener should not be set
+
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
+ cfg["dogstatsd_socket"] = socketPath
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ require.False(t, deps.Server.UdsListenerRunning())
+
+ _, err := net.Dial("unixgram", socketPath)
+ require.Error(t, err, "UDS listener should be closed")
+}
diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go
index 7588e7a5d105e..fdb92e02a42b1 100644
--- a/comp/dogstatsd/server/server_test.go
+++ b/comp/dogstatsd/server/server_test.go
@@ -8,118 +8,21 @@
package server
import (
- "context"
"fmt"
- "net"
"runtime"
- "sort"
- "strings"
"testing"
- "time"
-
- "github.com/DataDog/datadog-agent/pkg/util/testutil/flake"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "go.uber.org/fx"
- "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer"
- "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl"
- "github.com/DataDog/datadog-agent/comp/core"
- configComponent "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl"
- log "github.com/DataDog/datadog-agent/comp/core/log/def"
- logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/telemetry"
- "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
- workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
- workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
"github.com/DataDog/datadog-agent/comp/dogstatsd/listeners"
"github.com/DataDog/datadog-agent/comp/dogstatsd/mapper"
- "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap"
- "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap/pidmapimpl"
- replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def"
- replaymock "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/fx-mock"
- serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug"
- "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl"
- "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl"
"github.com/DataDog/datadog-agent/pkg/config/env"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
"github.com/DataDog/datadog-agent/pkg/config/model"
"github.com/DataDog/datadog-agent/pkg/metrics"
- "github.com/DataDog/datadog-agent/pkg/util/fxutil"
- "github.com/DataDog/datadog-agent/pkg/util/optional"
)
-// This is a copy of the serverDeps struct, but without the server field.
-// We need this to avoid starting multiple server with the same test.
-type depsWithoutServer struct {
- fx.In
-
- Config configComponent.Component
- Log log.Component
- Demultiplexer demultiplexer.FakeSamplerMock
- Replay replay.Component
- PidMap pidmap.Component
- Debug serverdebug.Component
- WMeta optional.Option[workloadmeta.Component]
- Telemetry telemetry.Component
-}
-
-type serverDeps struct {
- fx.In
-
- Config configComponent.Component
- Log log.Component
- Demultiplexer demultiplexer.FakeSamplerMock
- Replay replay.Component
- PidMap pidmap.Component
- Debug serverdebug.Component
- WMeta optional.Option[workloadmeta.Component]
- Telemetry telemetry.Component
- Server Component
-}
-
-func fulfillDeps(t testing.TB) serverDeps {
- return fulfillDepsWithConfigOverride(t, map[string]interface{}{})
-}
-
-func fulfillDepsWithConfigOverride(t testing.TB, overrides map[string]interface{}) serverDeps {
- // TODO: https://datadoghq.atlassian.net/browse/AMLII-1948
- if runtime.GOOS == "darwin" {
- flake.Mark(t)
- }
- return fxutil.Test[serverDeps](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: overrides,
- }),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- Module(Params{Serverless: false}),
- ))
-}
-
-func fulfillDepsWithConfigYaml(t testing.TB, yaml string) serverDeps {
- return fxutil.Test[serverDeps](t, fx.Options(
- fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }),
- fx.Provide(func(t testing.TB) configComponent.Component { return configComponent.NewMockFromYAML(t, yaml) }),
- telemetryimpl.MockModule(),
- hostnameimpl.MockModule(),
- serverdebugimpl.MockModule(),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- Module(Params{Serverless: false}),
- ))
-}
-
func TestNewServer(t *testing.T) {
cfg := make(map[string]interface{})
cfg["dogstatsd_port"] = listeners.RandomPortName
@@ -129,45 +32,17 @@ func TestNewServer(t *testing.T) {
}
-func TestStopServer(t *testing.T) {
+func TestUDSReceiverDisabled(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("UDS isn't supported on windows")
+ }
cfg := make(map[string]interface{})
-
cfg["dogstatsd_port"] = listeners.RandomPortName
+ cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
+ cfg["dogstatsd_socket"] = "" // disabled
- deps := fxutil.Test[depsWithoutServer](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: cfg,
- }),
- fx.Supply(Params{Serverless: false}),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- ))
-
- s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
- s.start(context.TODO())
- requireStart(t, s)
-
- s.stop(context.TODO())
-
- // check that the port can be bound, try for 100 ms
- address, err := net.ResolveUDPAddr("udp", s.UDPLocalAddr())
- require.NoError(t, err, "cannot resolve address")
-
- for i := 0; i < 10; i++ {
- var conn net.Conn
- conn, err = net.ListenUDP("udp", address)
- if err == nil {
- conn.Close()
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- require.NoError(t, err, "port is not available, it should be")
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ require.False(t, deps.Server.UdsListenerRunning())
}
// This test is proving that no data race occurred on the `cachedTlmOriginIds` map.
@@ -181,21 +56,7 @@ func TestNoRaceOriginTagMaps(t *testing.T) {
cfg["dogstatsd_port"] = listeners.RandomPortName
- deps := fxutil.Test[depsWithoutServer](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: cfg,
- }),
- fx.Supply(Params{Serverless: false}),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- ))
-
- s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
+ _, s := fulfillDepsWithInactiveServer(t, cfg)
sync := make(chan struct{})
done := make(chan struct{}, N)
@@ -213,530 +74,6 @@ func TestNoRaceOriginTagMaps(t *testing.T) {
}
}
-func testReceive(t *testing.T, conn net.Conn, demux demultiplexer.FakeSamplerMock) {
- // Test metric
- _, err := conn.Write([]byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
-
- samples, timedSamples := demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample := samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, sample.Mtype, metrics.GaugeType)
- assert.ElementsMatch(t, sample.Tags, []string{"sometag1:somevalue1", "sometag2:somevalue2"})
- demux.Reset()
-
- _, err = conn.Write([]byte("daemon:666|c|@0.5|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, metrics.CounterType, sample.Mtype)
- assert.Equal(t, 0.5, sample.SampleRate)
- demux.Reset()
-
- _, err = conn.Write([]byte("daemon:666|h|@0.5|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, metrics.HistogramType, sample.Mtype)
- assert.Equal(t, 0.5, sample.SampleRate)
- demux.Reset()
-
- _, err = conn.Write([]byte("daemon:666|ms|@0.5|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, metrics.HistogramType, sample.Mtype)
- assert.Equal(t, 0.5, sample.SampleRate)
- demux.Reset()
-
- _, err = conn.Write([]byte("daemon_set:abc|s|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon_set")
- assert.Equal(t, sample.RawValue, "abc")
- assert.Equal(t, sample.Mtype, metrics.SetType)
- demux.Reset()
-
- // multi-metric packet
- _, err = conn.Write([]byte("daemon1:666|c\ndaemon2:1000|c"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForNumberOfSamples(2, 0, time.Second*2)
- require.Len(t, samples, 2)
- require.Len(t, timedSamples, 0)
- sample1 := samples[0]
- assert.NotNil(t, sample1)
- assert.Equal(t, sample1.Name, "daemon1")
- assert.EqualValues(t, sample1.Value, 666.0)
- assert.Equal(t, sample1.Mtype, metrics.CounterType)
- sample2 := samples[1]
- assert.NotNil(t, sample2)
- assert.Equal(t, sample2.Name, "daemon2")
- assert.EqualValues(t, sample2.Value, 1000.0)
- assert.Equal(t, sample2.Mtype, metrics.CounterType)
- demux.Reset()
-
- // multi-value packet
- _, err = conn.Write([]byte("daemon1:666:123|c\ndaemon2:1000|c"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForNumberOfSamples(3, 0, time.Second*2)
- require.Len(t, samples, 3)
- require.Len(t, timedSamples, 0)
- sample1 = samples[0]
- assert.NotNil(t, sample1)
- assert.Equal(t, sample1.Name, "daemon1")
- assert.EqualValues(t, sample1.Value, 666.0)
- assert.Equal(t, sample1.Mtype, metrics.CounterType)
- sample2 = samples[1]
- assert.NotNil(t, sample2)
- assert.Equal(t, sample2.Name, "daemon1")
- assert.EqualValues(t, sample2.Value, 123.0)
- assert.Equal(t, sample2.Mtype, metrics.CounterType)
- sample3 := samples[2]
- assert.NotNil(t, sample3)
- assert.Equal(t, sample3.Name, "daemon2")
- assert.EqualValues(t, sample3.Value, 1000.0)
- assert.Equal(t, sample3.Mtype, metrics.CounterType)
- demux.Reset()
-
- // multi-value packet with skip empty
- _, err = conn.Write([]byte("daemon1::666::123::::|c\ndaemon2:1000|c"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForNumberOfSamples(3, 0, time.Second*2)
- require.Len(t, samples, 3)
- require.Len(t, timedSamples, 0)
- sample1 = samples[0]
- assert.NotNil(t, sample1)
- assert.Equal(t, sample1.Name, "daemon1")
- assert.EqualValues(t, sample1.Value, 666.0)
- assert.Equal(t, sample1.Mtype, metrics.CounterType)
- sample2 = samples[1]
- assert.NotNil(t, sample2)
- assert.Equal(t, sample2.Name, "daemon1")
- assert.EqualValues(t, sample2.Value, 123.0)
- assert.Equal(t, sample2.Mtype, metrics.CounterType)
- sample3 = samples[2]
- assert.NotNil(t, sample3)
- assert.Equal(t, sample3.Name, "daemon2")
- assert.EqualValues(t, sample3.Value, 1000.0)
- assert.Equal(t, sample3.Mtype, metrics.CounterType)
- demux.Reset()
-
- // // slightly malformed multi-metric packet, should still be parsed in whole
- _, err = conn.Write([]byte("daemon1:666|c\n\ndaemon2:1000|c\n"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForNumberOfSamples(2, 0, time.Second*2)
- require.Len(t, samples, 2)
- require.Len(t, timedSamples, 0)
- sample1 = samples[0]
- assert.NotNil(t, sample1)
- assert.Equal(t, sample1.Name, "daemon1")
- assert.EqualValues(t, sample1.Value, 666.0)
- assert.Equal(t, sample1.Mtype, metrics.CounterType)
- sample2 = samples[1]
- assert.NotNil(t, sample2)
- assert.Equal(t, sample2.Name, "daemon2")
- assert.EqualValues(t, sample2.Value, 1000.0)
- assert.Equal(t, sample2.Mtype, metrics.CounterType)
- demux.Reset()
-
- // Test erroneous metric
- _, err = conn.Write([]byte("daemon1:666a|g\ndaemon2:666|g|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon2")
- demux.Reset()
-
- // Test empty metric
- _, err = conn.Write([]byte("daemon1:|g\ndaemon2:666|g|#sometag1:somevalue1,sometag2:somevalue2\ndaemon3: :1:|g"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 0)
- sample = samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon2")
- demux.Reset()
-
- // Late gauge
- _, err = conn.Write([]byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2|T1658328888"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 0)
- require.Len(t, timedSamples, 1)
- sample = timedSamples[0]
- require.NotNil(t, sample)
- assert.Equal(t, sample.Mtype, metrics.GaugeType)
- assert.Equal(t, sample.Name, "daemon")
- assert.Equal(t, sample.Timestamp, float64(1658328888))
- demux.Reset()
-
- // Late count
- _, err = conn.Write([]byte("daemon:666|c|#sometag1:somevalue1,sometag2:somevalue2|T1658328888"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Len(t, samples, 0)
- require.Len(t, timedSamples, 1)
- sample = timedSamples[0]
- require.NotNil(t, sample)
- assert.Equal(t, sample.Mtype, metrics.CounterType)
- assert.Equal(t, sample.Name, "daemon")
- assert.Equal(t, sample.Timestamp, float64(1658328888))
- demux.Reset()
-
- // Late metric and a normal one
- _, err = conn.Write([]byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2|T1658328888\ndaemon2:666|c"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForNumberOfSamples(1, 1, time.Second*2)
- require.Len(t, samples, 1)
- require.Len(t, timedSamples, 1)
- sample = timedSamples[0]
- require.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.Equal(t, sample.Mtype, metrics.GaugeType)
- assert.Equal(t, sample.Timestamp, float64(1658328888))
- sample = samples[0]
- require.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon2")
- demux.Reset()
-
- // Test Service Check
- // ------------------
-
- eventOut, serviceOut := demux.GetEventsAndServiceChecksChannels()
-
- _, err = conn.Write([]byte("_sc|agent.up|0|d:12345|h:localhost|m:this is fine|#sometag1:somevalyyue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- select {
- case res := <-serviceOut:
- assert.NotNil(t, res)
- case <-time.After(2 * time.Second):
- assert.FailNow(t, "Timeout on receive channel")
- }
-
- // Test erroneous Service Check
- _, err = conn.Write([]byte("_sc|agen.down\n_sc|agent.up|0|d:12345|h:localhost|m:this is fine|#sometag1:somevalyyue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- select {
- case res := <-serviceOut:
- assert.Equal(t, 1, len(res))
- serviceCheck := res[0]
- assert.NotNil(t, serviceCheck)
- assert.Equal(t, serviceCheck.CheckName, "agent.up")
- case <-time.After(2 * time.Second):
- assert.FailNow(t, "Timeout on receive channel")
- }
-
- // Test Event
- // ----------
-
- _, err = conn.Write([]byte("_e{10,10}:test title|test\\ntext|t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test"))
- require.NoError(t, err, "cannot write to DSD socket")
- select {
- case res := <-eventOut:
- event := res[0]
- assert.NotNil(t, event)
- assert.ElementsMatch(t, event.Tags, []string{"tag1", "tag2:test"})
- case <-time.After(2 * time.Second):
- assert.FailNow(t, "Timeout on receive channel")
- }
-
- // Test erroneous Events
- _, err = conn.Write(
- []byte("_e{0,9}:|test text\n" +
- "_e{-5,2}:abc\n" +
- "_e{11,10}:test title2|test\\ntext|" +
- "t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test",
- ),
- )
- require.NoError(t, err, "cannot write to DSD socket")
- select {
- case res := <-eventOut:
- assert.Equal(t, 1, len(res))
- event := res[0]
- assert.NotNil(t, event)
- assert.Equal(t, event.Title, "test title2")
- case <-time.After(2 * time.Second):
- assert.FailNow(t, "Timeout on receive channel")
- }
-}
-
-func TestUDPReceive(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- demux := deps.Demultiplexer
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to UDP network")
- defer conn.Close()
-
- testReceive(t, conn, demux)
-}
-
-func TestUDPForward(t *testing.T) {
- cfg := make(map[string]interface{})
-
- pc, err := net.ListenPacket("udp", "127.0.0.1:0")
- require.NoError(t, err)
-
- pcHost, pcPort, err := net.SplitHostPort(pc.LocalAddr().String())
- require.NoError(t, err)
-
- // Setup UDP server to forward to
- cfg["statsd_forward_port"] = pcPort
- cfg["statsd_forward_host"] = pcHost
-
- // Setup dogstatsd server
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
-
- defer pc.Close()
-
- requireStart(t, deps.Server)
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err)
- require.NotNil(t, conn)
- defer conn.Close()
-
- // Check if message is forwarded
- message := []byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2")
-
- _, err = conn.Write(message)
- require.NoError(t, err, "cannot write to DSD socket")
-
- _ = pc.SetReadDeadline(time.Now().Add(4 * time.Second))
-
- buffer := make([]byte, len(message))
- _, _, err = pc.ReadFrom(buffer)
- require.NoError(t, err)
-
- assert.Equal(t, message, buffer)
-}
-
-func TestHistToDist(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["histogram_copy_to_distribution"] = true
- cfg["histogram_copy_to_distribution_prefix"] = "dist."
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
-
- demux := deps.Demultiplexer
- requireStart(t, deps.Server)
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- // Test metric
- _, err = conn.Write([]byte("daemon:666|h|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples := demux.WaitForSamples(time.Second * 2)
- require.Equal(t, 2, len(samples))
- require.Equal(t, 0, len(timedSamples))
- histMetric := samples[0]
- distMetric := samples[1]
- assert.NotNil(t, histMetric)
- assert.Equal(t, histMetric.Name, "daemon")
- assert.EqualValues(t, histMetric.Value, 666.0)
- assert.Equal(t, metrics.HistogramType, histMetric.Mtype)
-
- assert.NotNil(t, distMetric)
- assert.Equal(t, distMetric.Name, "dist.daemon")
- assert.EqualValues(t, distMetric.Value, 666.0)
- assert.Equal(t, metrics.DistributionType, distMetric.Mtype)
- demux.Reset()
-}
-
-func TestScanLines(t *testing.T) {
- messages := []string{"foo", "bar", "baz", "quz", "hax", ""}
- packet := []byte(strings.Join(messages, "\n"))
- cnt := 0
- advance, tok, eol, err := ScanLines(packet, true)
- for tok != nil && err == nil {
- cnt++
- assert.Equal(t, eol, true)
- packet = packet[advance:]
- advance, tok, eol, err = ScanLines(packet, true)
- }
-
- assert.False(t, eol)
- assert.Equal(t, 5, cnt)
-
- cnt = 0
- packet = []byte(strings.Join(messages[0:len(messages)-1], "\n"))
- advance, tok, eol, err = ScanLines(packet, true)
- for tok != nil && err == nil {
- cnt++
- packet = packet[advance:]
- advance, tok, eol, err = ScanLines(packet, true)
- }
-
- assert.False(t, eol)
- assert.Equal(t, 5, cnt)
-}
-
-func TestEOLParsing(t *testing.T) {
- messages := []string{"foo", "bar", "baz", "quz", "hax", ""}
- packet := []byte(strings.Join(messages, "\n"))
- cnt := 0
- msg := nextMessage(&packet, true)
- for msg != nil {
- assert.Equal(t, string(msg), messages[cnt])
- msg = nextMessage(&packet, true)
- cnt++
- }
-
- assert.Equal(t, 5, cnt)
-
- packet = []byte(strings.Join(messages[0:len(messages)-1], "\r\n"))
- cnt = 0
- msg = nextMessage(&packet, true)
- for msg != nil {
- msg = nextMessage(&packet, true)
- cnt++
- }
-
- assert.Equal(t, 4, cnt)
-}
-
-func TestE2EParsing(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- demux := deps.Demultiplexer
- requireStart(t, deps.Server)
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- // Test metric
- conn.Write([]byte("daemon:666|g|#foo:bar\ndaemon:666|g|#foo:bar"))
- samples, timedSamples := demux.WaitForSamples(time.Second * 2)
- assert.Equal(t, 2, len(samples))
- assert.Equal(t, 0, len(timedSamples))
- demux.Reset()
- demux.Stop(false)
-
- // EOL enabled
- cfg["dogstatsd_eol_required"] = []string{"udp"}
-
- deps = fulfillDepsWithConfigOverride(t, cfg)
- demux = deps.Demultiplexer
- requireStart(t, deps.Server)
-
- conn, err = net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- // Test metric expecting an EOL
- _, err = conn.Write([]byte("daemon:666|g|#foo:bar\ndaemon:666|g|#foo:bar"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples = demux.WaitForSamples(time.Second * 2)
- require.Equal(t, 1, len(samples))
- assert.Equal(t, 0, len(timedSamples))
- demux.Reset()
-}
-
-func TestExtraTags(t *testing.T) {
- cfg := make(map[string]interface{})
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_tags"] = []string{"sometag3:somevalue3"}
-
- env.SetFeatures(t, env.EKSFargate)
- deps := fulfillDepsWithConfigOverride(t, cfg)
-
- demux := deps.Demultiplexer
- requireStart(t, deps.Server)
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- // Test metric
- _, err = conn.Write([]byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2"))
- require.NoError(t, err, "cannot write to DSD socket")
- samples, timedSamples := demux.WaitForSamples(time.Second * 2)
- require.Equal(t, 1, len(samples))
- require.Equal(t, 0, len(timedSamples))
- sample := samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, sample.Mtype, metrics.GaugeType)
- assert.ElementsMatch(t, sample.Tags, []string{"sometag1:somevalue1", "sometag2:somevalue2", "sometag3:somevalue3"})
-}
-
-func TestStaticTags(t *testing.T) {
- cfg := make(map[string]interface{})
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_tags"] = []string{"sometag3:somevalue3"}
- cfg["tags"] = []string{"from:dd_tags"}
-
- env.SetFeatures(t, env.EKSFargate)
- deps := fulfillDepsWithConfigOverride(t, cfg)
-
- demux := deps.Demultiplexer
- requireStart(t, deps.Server)
-
- conn, err := net.Dial("udp", deps.Server.UDPLocalAddr())
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- // Test metric
- conn.Write([]byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2"))
- samples, timedSamples := demux.WaitForSamples(time.Second * 2)
- require.Equal(t, 1, len(samples))
- require.Equal(t, 0, len(timedSamples))
- sample := samples[0]
- assert.NotNil(t, sample)
- assert.Equal(t, sample.Name, "daemon")
- assert.EqualValues(t, sample.Value, 666.0)
- assert.Equal(t, sample.Mtype, metrics.GaugeType)
- assert.ElementsMatch(t, sample.Tags, []string{
- "sometag1:somevalue1",
- "sometag2:somevalue2",
- "sometag3:somevalue3",
- "from:dd_tags",
- })
-}
-
func TestNoMappingsConfig(t *testing.T) {
cfg := make(map[string]interface{})
cfg["dogstatsd_port"] = listeners.RandomPortName
@@ -757,270 +94,6 @@ func TestNoMappingsConfig(t *testing.T) {
assert.Len(t, samples, 1)
}
-func TestParseMetricMessageTelemetry(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fxutil.Test[depsWithoutServer](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: cfg,
- }),
- fx.Supply(Params{Serverless: false}),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- ))
-
- s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
-
- assert.Nil(t, s.mapper)
-
- var samples []metrics.MetricSample
-
- parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
-
- assert.Equal(t, float64(0), s.tlmProcessedOk.Get())
- samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", "", false)
- assert.NoError(t, err)
- assert.Len(t, samples, 1)
- assert.Equal(t, float64(1), s.tlmProcessedOk.Get())
-
- assert.Equal(t, float64(0), s.tlmProcessedError.Get())
- samples, err = s.parseMetricMessage(samples, parser, nil, "", "", false)
- assert.Error(t, err, "invalid dogstatsd message format")
- assert.Len(t, samples, 1)
- assert.Equal(t, float64(1), s.tlmProcessedError.Get())
-}
-
-type MetricSample struct {
- Name string
- Value float64
- Tags []string
- Mtype metrics.MetricType
-}
-
-func TestMappingCases(t *testing.T) {
- scenarios := []struct {
- name string
- config string
- packets []string
- expectedSamples []MetricSample
- expectedCacheSize int
- }{
- {
- name: "Simple OK case",
- config: `
-dogstatsd_port: __random__
-dogstatsd_mapper_profiles:
- - name: test
- prefix: 'test.'
- mappings:
- - match: "test.job.duration.*.*"
- name: "test.job.duration"
- tags:
- job_type: "$1"
- job_name: "$2"
- - match: "test.job.size.*.*"
- name: "test.job.size"
- tags:
- foo: "$1"
- bar: "$2"
-`,
- packets: []string{
- "test.job.duration.my_job_type.my_job_name:666|g",
- "test.job.size.my_job_type.my_job_name:666|g",
- "test.job.size.not_match:666|g",
- },
- expectedSamples: []MetricSample{
- {Name: "test.job.duration", Tags: []string{"job_type:my_job_type", "job_name:my_job_name"}, Mtype: metrics.GaugeType, Value: 666.0},
- {Name: "test.job.size", Tags: []string{"foo:my_job_type", "bar:my_job_name"}, Mtype: metrics.GaugeType, Value: 666.0},
- {Name: "test.job.size.not_match", Tags: nil, Mtype: metrics.GaugeType, Value: 666.0},
- },
- expectedCacheSize: 1000,
- },
- {
- name: "Tag already present",
- config: `
-dogstatsd_port: __random__
-dogstatsd_mapper_profiles:
- - name: test
- prefix: 'test.'
- mappings:
- - match: "test.job.duration.*.*"
- name: "test.job.duration"
- tags:
- job_type: "$1"
- job_name: "$2"
-`,
- packets: []string{
- "test.job.duration.my_job_type.my_job_name:666|g",
- "test.job.duration.my_job_type.my_job_name:666|g|#some:tag",
- "test.job.duration.my_job_type.my_job_name:666|g|#some:tag,more:tags",
- },
- expectedSamples: []MetricSample{
- {Name: "test.job.duration", Tags: []string{"job_type:my_job_type", "job_name:my_job_name"}, Mtype: metrics.GaugeType, Value: 666.0},
- {Name: "test.job.duration", Tags: []string{"job_type:my_job_type", "job_name:my_job_name", "some:tag"}, Mtype: metrics.GaugeType, Value: 666.0},
- {Name: "test.job.duration", Tags: []string{"job_type:my_job_type", "job_name:my_job_name", "some:tag", "more:tags"}, Mtype: metrics.GaugeType, Value: 666.0},
- },
- expectedCacheSize: 1000,
- },
- {
- name: "Cache size",
- config: `
-dogstatsd_port: __random__
-dogstatsd_mapper_cache_size: 999
-dogstatsd_mapper_profiles:
- - name: test
- prefix: 'test.'
- mappings:
- - match: "test.job.duration.*.*"
- name: "test.job.duration"
- tags:
- job_type: "$1"
- job_name: "$2"
-`,
- packets: []string{},
- expectedSamples: nil,
- expectedCacheSize: 999,
- },
- }
-
- samples := []metrics.MetricSample{}
- for _, scenario := range scenarios {
- t.Run(scenario.name, func(t *testing.T) {
- deps := fulfillDepsWithConfigYaml(t, scenario.config)
-
- s := deps.Server.(*server)
-
- requireStart(t, s)
-
- assert.Equal(t, deps.Config.Get("dogstatsd_mapper_cache_size"), scenario.expectedCacheSize, "Case `%s` failed. cache_size `%s` should be `%s`", scenario.name, deps.Config.Get("dogstatsd_mapper_cache_size"), scenario.expectedCacheSize)
-
- var actualSamples []MetricSample
- for _, p := range scenario.packets {
- parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
- samples, err := s.parseMetricMessage(samples, parser, []byte(p), "", "", false)
- assert.NoError(t, err, "Case `%s` failed. parseMetricMessage should not return error %v", err)
- for _, sample := range samples {
- actualSamples = append(actualSamples, MetricSample{Name: sample.Name, Tags: sample.Tags, Mtype: sample.Mtype, Value: sample.Value})
- }
- }
- for _, sample := range scenario.expectedSamples {
- sort.Strings(sample.Tags)
- }
- for _, sample := range actualSamples {
- sort.Strings(sample.Tags)
- }
- assert.Equal(t, scenario.expectedSamples, actualSamples, "Case `%s` failed. `%s` should be `%s`", scenario.name, actualSamples, scenario.expectedSamples)
- })
- }
-}
-
-func TestParseEventMessageTelemetry(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fxutil.Test[depsWithoutServer](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: cfg,
- }),
- fx.Supply(Params{Serverless: false}),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- ))
-
- s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
-
- parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
-
- telemetryMock, ok := deps.Telemetry.(telemetry.Mock)
- assert.True(t, ok)
-
- // three successful events
- s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
- s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
- s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
- // one error event
- _, err := s.parseEventMessage(parser, nil, "")
- assert.Error(t, err)
-
- processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed")
- require.NoError(t, err)
-
- for _, metric := range processedEvents {
- labels := metric.Tags()
-
- if labels["message_type"] == "events" && labels["state"] == "ok" {
- assert.Equal(t, float64(3), metric.Value())
- }
-
- if labels["message_type"] == "events" && labels["state"] == "error" {
- assert.Equal(t, float64(1), metric.Value())
- }
- }
-}
-
-func TestParseServiceCheckMessageTelemetry(t *testing.T) {
- cfg := make(map[string]interface{})
-
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fxutil.Test[depsWithoutServer](t, fx.Options(
- core.MockBundle(),
- serverdebugimpl.MockModule(),
- fx.Replace(configComponent.MockParams{
- Overrides: cfg,
- }),
- fx.Supply(Params{Serverless: false}),
- replaymock.MockModule(),
- compressionimpl.MockModule(),
- pidmapimpl.Module(),
- demultiplexerimpl.FakeSamplerMockModule(),
- workloadmetafxmock.MockModule(workloadmeta.NewParams()),
- ))
-
- s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
-
- parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
-
- telemetryMock, ok := deps.Telemetry.(telemetry.Mock)
- assert.True(t, ok)
-
- // three successful events
- s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
- s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
- s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
- // one error event
- _, err := s.parseServiceCheckMessage(parser, nil, "")
- assert.Error(t, err)
-
- processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed")
- require.NoError(t, err)
-
- for _, metric := range processedEvents {
- labels := metric.Tags()
-
- if labels["message_type"] == "service_checks" && labels["state"] == "ok" {
- assert.Equal(t, float64(3), metric.Value())
- }
-
- if labels["message_type"] == "service_checks" && labels["state"] == "error" {
- assert.Equal(t, float64(1), metric.Value())
- }
- }
-}
-
func TestNewServerExtraTags(t *testing.T) {
cfg := make(map[string]interface{})
@@ -1032,103 +105,34 @@ func TestNewServerExtraTags(t *testing.T) {
requireStart(t, s)
require.Len(s.extraTags, 0, "no tags should have been read")
- // when the extraTags parameter isn't used, the DogStatsD server is not reading this env var
+ // when not running in fargate, the tags entry is not used
cfg["tags"] = "hello:world"
deps = fulfillDepsWithConfigOverride(t, cfg)
s = deps.Server.(*server)
requireStart(t, s)
require.Len(s.extraTags, 0, "no tags should have been read")
- // when the extraTags parameter isn't used, the DogStatsD server is automatically reading this env var for extra tags
- cfg["dogstatsd_tags"] = "hello:world extra:tags"
+ // dogstatsd_tag is always pulled in to extra tags
+ cfg["dogstatsd_tags"] = "hello:world2 extra:tags"
deps = fulfillDepsWithConfigOverride(t, cfg)
s = deps.Server.(*server)
requireStart(t, s)
+ require.ElementsMatch([]string{"extra:tags", "hello:world2"}, s.extraTags, "two tags should have been read")
require.Len(s.extraTags, 2, "two tags should have been read")
require.Equal(s.extraTags[0], "extra:tags", "the tag extra:tags should be set")
- require.Equal(s.extraTags[1], "hello:world", "the tag hello:world should be set")
-}
+ require.Equal(s.extraTags[1], "hello:world2", "the tag hello:world should be set")
-func TestProcessedMetricsOrigin(t *testing.T) {
- for _, enabled := range []bool{true, false} {
- cfg := make(map[string]interface{})
- cfg["dogstatsd_origin_optout_enabled"] = enabled
- cfg["dogstatsd_port"] = listeners.RandomPortName
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- s := deps.Server.(*server)
- assert := assert.New(t)
-
- s.Stop()
-
- assert.Len(s.cachedOriginCounters, 0, "this cache must be empty")
- assert.Len(s.cachedOrder, 0, "this cache list must be empty")
-
- parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
- samples := []metrics.MetricSample{}
- samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "test_container", "1", false)
- assert.NoError(err)
- assert.Len(samples, 1)
-
- // one thing should have been stored when we parse a metric
- samples, err = s.parseMetricMessage(samples, parser, []byte("test.metric:555|g"), "test_container", "1", true)
- assert.NoError(err)
- assert.Len(samples, 2)
- assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached")
- assert.Len(s.cachedOrder, 1, "one entry should have been cached")
- assert.Equal(s.cachedOrder[0].origin, "test_container")
-
- // when we parse another metric (different value) with same origin, cache should contain only one entry
- samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "test_container", "2", true)
- assert.NoError(err)
- assert.Len(samples, 3)
- assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached")
- assert.Len(s.cachedOrder, 1, "one entry should have been cached")
- assert.Equal(s.cachedOrder[0].origin, "test_container")
- assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "test_container"})
- assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "test_container"})
-
- // when we parse another metric (different value) but with a different origin, we should store a new entry
- samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "another_container", "3", true)
- assert.NoError(err)
- assert.Len(samples, 4)
- assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached")
- assert.Len(s.cachedOrder, 2, "two entries should have been cached")
- assert.Equal(s.cachedOrder[0].origin, "test_container")
- assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "test_container"})
- assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "test_container"})
- assert.Equal(s.cachedOrder[1].origin, "another_container")
- assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "another_container"})
- assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "another_container"})
-
- // oldest one should be removed once we reach the limit of the cache
- maxOriginCounters = 2
- samples, err = s.parseMetricMessage(samples, parser, []byte("yetanothermetric:525|g"), "third_origin", "3", true)
- assert.NoError(err)
- assert.Len(samples, 5)
- assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, one has been evicted already")
- assert.Len(s.cachedOrder, 2, "two entries should have been cached, one has been evicted already")
- assert.Equal(s.cachedOrder[0].origin, "another_container")
- assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "another_container"})
- assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "another_container"})
- assert.Equal(s.cachedOrder[1].origin, "third_origin")
- assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "third_origin"})
- assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "third_origin"})
+ // when running in fargate, "tags" and "dogstatsd_tag" configs are conjoined
+ env.SetFeatures(t, env.EKSFargate)
+ deps = fulfillDepsWithConfigOverride(t, cfg)
+ s = deps.Server.(*server)
+ requireStart(t, s)
- // oldest one should be removed once we reach the limit of the cache
- maxOriginCounters = 2
- samples, err = s.parseMetricMessage(samples, parser, []byte("blablabla:555|g"), "fourth_origin", "4", true)
- assert.NoError(err)
- assert.Len(samples, 6)
- assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, two have been evicted already")
- assert.Len(s.cachedOrder, 2, "two entries should have been cached, two have been evicted already")
- assert.Equal(s.cachedOrder[0].origin, "third_origin")
- assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "third_origin"})
- assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "third_origin"})
- assert.Equal(s.cachedOrder[1].origin, "fourth_origin")
- assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "fourth_origin"})
- assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "fourth_origin"})
- }
+ require.ElementsMatch(
+ []string{"hello:world", "extra:tags", "hello:world2"},
+ s.extraTags,
+ "both tag sources should have been combined",
+ )
}
//nolint:revive // TODO(AML) Fix revive linter
diff --git a/comp/dogstatsd/server/server_uds_test.go b/comp/dogstatsd/server/server_uds_test.go
deleted file mode 100644
index e272136a17920..0000000000000
--- a/comp/dogstatsd/server/server_uds_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-//go:build !windows
-
-// UDS won't work in windows
-
-package server
-
-import (
- "net"
- "path/filepath"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/DataDog/datadog-agent/comp/dogstatsd/listeners"
-)
-
-func TestUDSReceiver(t *testing.T) {
- socketPath := filepath.Join(t.TempDir(), "dsd.socket")
-
- cfg := make(map[string]interface{})
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
- cfg["dogstatsd_socket"] = socketPath
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- demux := deps.Demultiplexer
- require.True(t, deps.Server.UdsListenerRunning())
-
- conn, err := net.Dial("unixgram", socketPath)
- require.NoError(t, err, "cannot connect to DSD socket")
- defer conn.Close()
-
- testReceive(t, conn, demux)
-
- s := deps.Server.(*server)
- s.Stop()
- _, err = net.Dial("unixgram", socketPath)
- require.Error(t, err, "UDS listener should be closed")
-}
-
-func TestUDSReceiverDisabled(t *testing.T) {
- cfg := make(map[string]interface{})
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
- cfg["dogstatsd_socket"] = "" // disabled
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- require.False(t, deps.Server.UdsListenerRunning())
-}
-
-func TestUDSReceiverNoDir(t *testing.T) {
- socketPath := filepath.Join(t.TempDir(), "nonexistent", "dsd.socket") // nonexistent dir, listener should not be set
-
- cfg := make(map[string]interface{})
- cfg["dogstatsd_port"] = listeners.RandomPortName
- cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off
- cfg["dogstatsd_socket"] = socketPath
-
- deps := fulfillDepsWithConfigOverride(t, cfg)
- require.False(t, deps.Server.UdsListenerRunning())
-
- _, err := net.Dial("unixgram", socketPath)
- require.Error(t, err, "UDS listener should be closed")
-}
diff --git a/comp/dogstatsd/server/server_util_test.go b/comp/dogstatsd/server/server_util_test.go
new file mode 100644
index 0000000000000..4de92ea776801
--- /dev/null
+++ b/comp/dogstatsd/server/server_util_test.go
@@ -0,0 +1,326 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package server
+
+import (
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/DataDog/datadog-agent/pkg/metrics/event"
+ "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck"
+ "github.com/DataDog/datadog-agent/pkg/util/testutil/flake"
+
+ "go.uber.org/fx"
+
+ "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer"
+ "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl"
+ "github.com/DataDog/datadog-agent/comp/core"
+ configComponent "github.com/DataDog/datadog-agent/comp/core/config"
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl"
+ log "github.com/DataDog/datadog-agent/comp/core/log/def"
+ logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
+ workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/packets"
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap"
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap/pidmapimpl"
+ replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def"
+ replaymock "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/fx-mock"
+ serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug"
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl"
+ "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl"
+ "github.com/DataDog/datadog-agent/pkg/metrics"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/DataDog/datadog-agent/pkg/util/optional"
+)
+
+// This is a copy of the serverDeps struct, but without the server field.
+// We need this to avoid starting multiple server with the same test.
+type depsWithoutServer struct {
+ fx.In
+
+ Config configComponent.Component
+ Log log.Component
+ Demultiplexer demultiplexer.FakeSamplerMock
+ Replay replay.Component
+ PidMap pidmap.Component
+ Debug serverdebug.Component
+ WMeta optional.Option[workloadmeta.Component]
+ Telemetry telemetry.Component
+}
+
+type serverDeps struct {
+ fx.In
+
+ Config configComponent.Component
+ Log log.Component
+ Demultiplexer demultiplexer.FakeSamplerMock
+ Replay replay.Component
+ PidMap pidmap.Component
+ Debug serverdebug.Component
+ WMeta optional.Option[workloadmeta.Component]
+ Telemetry telemetry.Component
+ Server Component
+}
+
+func fulfillDeps(t testing.TB) serverDeps {
+ return fulfillDepsWithConfigOverride(t, map[string]interface{}{})
+}
+
+func fulfillDepsWithConfigOverride(t testing.TB, overrides map[string]interface{}) serverDeps {
+ // TODO: https://datadoghq.atlassian.net/browse/AMLII-1948
+ if runtime.GOOS == "darwin" {
+ flake.Mark(t)
+ }
+ return fxutil.Test[serverDeps](t, fx.Options(
+ core.MockBundle(),
+ serverdebugimpl.MockModule(),
+ fx.Replace(configComponent.MockParams{
+ Overrides: overrides,
+ }),
+ replaymock.MockModule(),
+ compressionimpl.MockModule(),
+ pidmapimpl.Module(),
+ demultiplexerimpl.FakeSamplerMockModule(),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ Module(Params{Serverless: false}),
+ ))
+}
+
+func fulfillDepsWithConfigYaml(t testing.TB, yaml string) serverDeps {
+ return fxutil.Test[serverDeps](t, fx.Options(
+ fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }),
+ fx.Provide(func(t testing.TB) configComponent.Component { return configComponent.NewMockFromYAML(t, yaml) }),
+ telemetryimpl.MockModule(),
+ hostnameimpl.MockModule(),
+ serverdebugimpl.MockModule(),
+ replaymock.MockModule(),
+ compressionimpl.MockModule(),
+ pidmapimpl.Module(),
+ demultiplexerimpl.FakeSamplerMockModule(),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ Module(Params{Serverless: false}),
+ ))
+}
+
+// Returns a server that is not started along with associated dependencies
+// Be careful when using this functionality, as server start instantiates many internal components to non-nil values
+func fulfillDepsWithInactiveServer(t *testing.T, cfg map[string]interface{}) (depsWithoutServer, *server) {
+ deps := fxutil.Test[depsWithoutServer](t, fx.Options(
+ core.MockBundle(),
+ serverdebugimpl.MockModule(),
+ fx.Replace(configComponent.MockParams{
+ Overrides: cfg,
+ }),
+ fx.Supply(Params{Serverless: false}),
+ replaymock.MockModule(),
+ compressionimpl.MockModule(),
+ pidmapimpl.Module(),
+ demultiplexerimpl.FakeSamplerMockModule(),
+ workloadmetafxmock.MockModule(workloadmeta.NewParams()),
+ ))
+
+ s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry)
+
+ return deps, s
+}
+
+type batcherMock struct {
+ serviceChecks []*servicecheck.ServiceCheck
+ events []*event.Event
+ lateSamples []metrics.MetricSample
+ samples []metrics.MetricSample
+}
+
+func (b *batcherMock) appendServiceCheck(serviceCheck *servicecheck.ServiceCheck) {
+ b.serviceChecks = append(b.serviceChecks, serviceCheck)
+}
+
+func (b *batcherMock) appendEvent(event *event.Event) {
+ b.events = append(b.events, event)
+}
+
+func (b *batcherMock) appendLateSample(sample metrics.MetricSample) {
+ b.lateSamples = append(b.lateSamples, sample)
+}
+
+func (b *batcherMock) appendSample(sample metrics.MetricSample) {
+ b.samples = append(b.samples, sample)
+}
+
+func (b *batcherMock) flush() {}
+
+func (b *batcherMock) clear() {
+ b.serviceChecks = b.serviceChecks[0:0]
+ b.events = b.events[0:0]
+ b.lateSamples = b.lateSamples[0:0]
+ b.samples = b.samples[0:0]
+}
+
+func genTestPackets(inputs ...[]byte) []*packets.Packet {
+ packetSet := make([]*packets.Packet, len(inputs))
+ for idx, input := range inputs {
+ packet := &packets.Packet{
+ Contents: input,
+ Origin: "test-origin",
+ ListenerID: "noop-listener",
+ Source: packets.UDP,
+ }
+ packetSet[idx] = packet
+ }
+
+ return packetSet
+}
+
+var defaultMetricInput = []byte("daemon:666|g|#sometag1:somevalue1,sometag2:somevalue2")
+
+func defaultMetric() *tMetricSample {
+ return &tMetricSample{
+ Name: "daemon",
+ Value: 666.0,
+ SampleRate: 1,
+ Mtype: metrics.GaugeType,
+ Tags: []string{"sometag1:somevalue1", "sometag2:somevalue2"},
+ }
+}
+
+var defaultServiceInput = []byte("_sc|agent.up|0|d:12345|h:localhost|m:this is fine|#sometag1:somevalyyue1,sometag2:somevalue2")
+
+func defaultServiceCheck() tServiceCheck {
+ return tServiceCheck{
+ CheckName: "agent.up",
+ Host: "localhost",
+ Message: "this is fine",
+ Tags: []string{"sometag1:somevalyyue1", "sometag2:somevalue2"},
+ Status: 0,
+ Ts: 12345,
+ }
+}
+
+var defaultEventInput = []byte("_e{10,10}:test title|test\\ntext|t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test")
+
+func defaultEvent() tEvent {
+ return tEvent{
+ Title: "test title",
+ Text: "test\ntext",
+ Tags: []string{"tag1", "tag2:test"},
+ Host: "some.host",
+ Ts: 12345,
+ AlertType: event.AlertTypeWarning,
+
+ Priority: event.PriorityLow,
+ AggregationKey: "aggKey",
+ SourceTypeName: "source test",
+ }
+}
+
+type tMetricSample struct {
+ Name string
+ Value float64
+ Tags []string
+ Mtype metrics.MetricType
+ SampleRate float64
+ RawValue string
+ Timestamp float64
+}
+
+func (m tMetricSample) testMetric(t *testing.T, actual metrics.MetricSample) {
+ s := "metric %s was expected to match"
+ assert.Equal(t, m.Name, actual.Name, s, "name")
+ assert.Equal(t, m.Value, actual.Value, s, "value")
+ assert.Equal(t, m.Mtype, actual.Mtype, s, "type")
+ assert.ElementsMatch(t, m.Tags, actual.Tags, s, "tags")
+ assert.Equal(t, m.SampleRate, actual.SampleRate, s, "sample rate")
+ assert.Equal(t, m.RawValue, actual.RawValue, s, "raw value")
+ assert.Equal(t, m.Timestamp, actual.Timestamp, s, "timestamp")
+}
+
+func (m *tMetricSample) withName(n string) *tMetricSample {
+ m.Name = n
+ return m
+}
+
+func (m *tMetricSample) withValue(v float64) *tMetricSample {
+ m.Value = v
+ return m
+}
+
+func (m *tMetricSample) withType(t metrics.MetricType) *tMetricSample {
+ m.Mtype = t
+ return m
+}
+
+func (m *tMetricSample) withTags(tags []string) *tMetricSample {
+ m.Tags = tags
+ return m
+}
+
+func (m *tMetricSample) withSampleRate(srate float64) *tMetricSample {
+ m.SampleRate = srate
+ return m
+}
+
+func (m *tMetricSample) withRawValue(rval string) *tMetricSample {
+ m.RawValue = rval
+ return m
+}
+
+func (m *tMetricSample) withTimestamp(timestamp float64) *tMetricSample {
+ m.Timestamp = timestamp
+ return m
+}
+
+type tServiceCheck struct {
+ CheckName string
+ Host string
+ Message string
+ Ts int64
+ Tags []string
+ Status servicecheck.ServiceCheckStatus
+}
+
+func (expected tServiceCheck) testService(t *testing.T, actual *servicecheck.ServiceCheck) {
+ s := "service check %s was expected to match"
+ assert.Equal(t, expected.CheckName, actual.CheckName, s, "check name")
+ assert.Equal(t, expected.Host, actual.Host, s, "host")
+ assert.Equal(t, expected.Message, actual.Message, s, "message")
+ assert.Equal(t, expected.Ts, actual.Ts, s, "timestamp")
+ assert.ElementsMatch(t, expected.Tags, actual.Tags, s, "tags")
+ assert.Equal(t, expected.Status, actual.Status, s, "status")
+}
+
+type tEvent struct {
+ Title string
+ Text string
+ Tags []string
+ Host string
+ Ts int64
+ AlertType event.AlertType
+ EventType string
+ Priority event.Priority
+ AggregationKey string
+ SourceTypeName string
+}
+
+func (expected tEvent) testEvent(t *testing.T, actual *event.Event) {
+ s := "event %s was expected to match"
+ assert.Equal(t, expected.Title, actual.Title, s, "title")
+ assert.Equal(t, expected.Text, actual.Text, s, "text")
+ assert.ElementsMatch(t, expected.Tags, actual.Tags, s, "tags")
+ assert.Equal(t, expected.Host, actual.Host, s, "host")
+ assert.Equal(t, expected.Ts, actual.Ts, s, "timestamp")
+ assert.Equal(t, expected.AlertType, actual.AlertType, s, "alert type")
+ assert.Equal(t, expected.EventType, actual.EventType, s, "type")
+ assert.Equal(t, expected.Priority, actual.Priority, s, "priority")
+ assert.Equal(t, expected.AggregationKey, actual.AggregationKey, s, "aggregation key")
+ assert.Equal(t, expected.SourceTypeName, actual.SourceTypeName, s, "source type name")
+}
diff --git a/comp/dogstatsd/server/server_worker_test.go b/comp/dogstatsd/server/server_worker_test.go
new file mode 100644
index 0000000000000..5dbe409c9d863
--- /dev/null
+++ b/comp/dogstatsd/server/server_worker_test.go
@@ -0,0 +1,579 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build test
+
+package server
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/comp/dogstatsd/listeners"
+ "github.com/DataDog/datadog-agent/pkg/metrics"
+ "github.com/DataDog/datadog-agent/pkg/metrics/event"
+)
+
+// Run through all of the major metric types and verify both the default and the timestamped flows
+func TestMetricTypes(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+
+ scenarios := []struct {
+ name string
+ input []byte
+ test *tMetricSample
+ }{
+ {
+ name: "Test Gauge",
+ input: []byte("daemon:666|g|@0.5|#sometag1:somevalue1,sometag2:somevalue2"),
+ test: defaultMetric().withType(metrics.GaugeType).withSampleRate(0.5),
+ },
+ {
+ name: "Test Counter",
+ input: []byte("daemon:666|c|@0.5|#sometag1:somevalue1,sometag2:somevalue2"),
+ test: defaultMetric().withType(metrics.CounterType).withSampleRate(0.5),
+ },
+ {
+ name: "Test Histogram",
+ input: []byte("daemon:666|h|@0.5|#sometag1:somevalue1,sometag2:somevalue2"),
+ test: defaultMetric().withType(metrics.HistogramType).withSampleRate(0.5),
+ },
+ {
+ name: "Test Timing",
+ input: []byte("daemon:666|ms|@0.5|#sometag1:somevalue1,sometag2:somevalue2"),
+ test: defaultMetric().withType(metrics.HistogramType).withSampleRate(0.5)},
+ {
+ name: "Test Set",
+ input: []byte("daemon:abc|s|@0.5|#sometag1:somevalue1,sometag2:somevalue2"),
+ test: defaultMetric().withType(metrics.SetType).withSampleRate(0.5).withValue(0).withRawValue("abc"),
+ },
+ }
+
+ for _, s := range scenarios {
+ t.Run(s.name, func(t *testing.T) {
+ runTestMetrics(t, deps, s.input, []*tMetricSample{s.test}, []*tMetricSample{})
+
+ timedInput := append(s.input, []byte("|T1658328888\n")...)
+ s.test.withTimestamp(1658328888)
+ runTestMetrics(t, deps, timedInput, []*tMetricSample{}, []*tMetricSample{s.test})
+ })
+ }
+}
+
+func TestMetricPermutations(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+
+ packet1Test := defaultMetric().withTags(nil).withType(metrics.CounterType)
+ packet1AltTest := defaultMetric().withValue(123.0).withTags(nil).withType(metrics.CounterType)
+ packet2Test := defaultMetric().withName("daemon2").withValue(1000.0).withType(metrics.CounterType)
+
+ scenarios := []struct {
+ name string
+ input []byte
+ tests []*tMetricSample
+ }{
+ {
+ name: "Base multi-metric packet",
+ input: []byte("daemon:666|c\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2"),
+ tests: []*tMetricSample{packet1Test, packet2Test},
+ },
+ {
+ name: "Multi-value packet",
+ input: []byte("daemon:666:123|c\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2"),
+ tests: []*tMetricSample{packet1Test, packet1AltTest, packet2Test},
+ },
+ {
+ name: "Multi-value packet with skip empty",
+ input: []byte("daemon::666::123::::|c\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2"),
+ tests: []*tMetricSample{packet1Test, packet1AltTest, packet2Test},
+ },
+ {
+ name: "Malformed packet",
+ input: []byte("daemon:666|c\n\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2\n"),
+ tests: []*tMetricSample{packet1Test, packet2Test},
+ },
+ {
+ name: "Malformed metric",
+ input: []byte("daemon:666a|g\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2"),
+ tests: []*tMetricSample{packet2Test},
+ },
+ {
+ name: "Empty metric",
+ input: []byte("daemon:|g\ndaemon2:1000|c|#sometag1:somevalue1,sometag2:somevalue2\ndaemon3: :1:|g"),
+ tests: []*tMetricSample{packet2Test},
+ },
+ }
+
+ for _, s := range scenarios {
+ t.Run(s.name, func(t *testing.T) {
+ runTestMetrics(t, deps, s.input, s.tests, []*tMetricSample{})
+ })
+ }
+}
+
+func runTestMetrics(t *testing.T, deps serverDeps, input []byte, expTests []*tMetricSample, expTimeTests []*tMetricSample) {
+ s := deps.Server.(*server)
+
+ var b batcherMock
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+ s.parsePackets(&b, parser, genTestPackets(input), metrics.MetricSampleBatch{})
+
+ samples := b.samples
+ timedSamples := b.lateSamples
+
+ assert.Equal(t, len(expTests), len(samples))
+ assert.Equal(t, len(expTimeTests), len(timedSamples))
+
+ for idx, samp := range samples {
+ expTests[idx].testMetric(t, samp)
+ }
+ for idx, tSamp := range timedSamples {
+ expTimeTests[idx].testMetric(t, tSamp)
+ }
+}
+
+func TestEvents(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ s := deps.Server.(*server)
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+ var b batcherMock
+
+ input1 := defaultEventInput
+ test1 := defaultEvent()
+ input2 := []byte("_e{11,15}:test titled|test\\ntextntext|t:info|d:12346|p:normal|h:some.otherhost|k:aggKeyAlt|s:source investigation|#tag1,tag2:test,tag3:resolved")
+ test2 := tEvent{
+ Title: "test titled",
+ Text: "test\ntextntext",
+ Tags: []string{"tag1", "tag2:test", "tag3:resolved"},
+ Host: "some.otherhost",
+ Ts: 12346,
+ AlertType: event.AlertTypeInfo,
+ Priority: event.PriorityNormal,
+ AggregationKey: "aggKeyAlt",
+ SourceTypeName: "source investigation",
+ }
+
+ s.parsePackets(&b, parser, genTestPackets(input1, input2), metrics.MetricSampleBatch{})
+
+ assert.Equal(t, 2, len(b.events))
+
+ test1.testEvent(t, b.events[0])
+ test2.testEvent(t, b.events[1])
+
+ b.clear()
+ // Test incomplete Events
+ input := []byte("_e{0,9}:|test text\n" +
+ string(defaultEventInput) + "\n" +
+ "_e{-5,2}:abc\n",
+ )
+
+ s.parsePackets(&b, parser, genTestPackets(input), metrics.MetricSampleBatch{})
+ assert.Equal(t, 1, len(b.events))
+ defaultEvent().testEvent(t, b.events[0])
+}
+
+func TestServiceChecks(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ s := deps.Server.(*server)
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+ var b batcherMock
+
+ s.parsePackets(&b, parser, genTestPackets(defaultServiceInput), metrics.MetricSampleBatch{})
+
+ assert.Equal(t, 1, len(b.serviceChecks))
+ defaultServiceCheck().testService(t, b.serviceChecks[0])
+
+ b.clear()
+
+ // Test incomplete Service Check
+ input := append([]byte("_sc|agen.down\n"), defaultServiceInput...)
+ s.parsePackets(&b, parser, genTestPackets(input), metrics.MetricSampleBatch{})
+
+ assert.Equal(t, 1, len(b.serviceChecks))
+ defaultServiceCheck().testService(t, b.serviceChecks[0])
+}
+
+func TestHistToDist(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+ cfg["histogram_copy_to_distribution"] = true
+ cfg["histogram_copy_to_distribution_prefix"] = "dist."
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+
+ // Test metric
+ input := []byte("daemon:666|h|#sometag1:somevalue1,sometag2:somevalue2")
+
+ test1 := defaultMetric().withType(metrics.HistogramType)
+ test2 := defaultMetric().withName("dist.daemon").withType(metrics.DistributionType)
+
+ runTestMetrics(t, deps, input, []*tMetricSample{test1, test2}, []*tMetricSample{})
+}
+
+func TestExtraTags(t *testing.T) {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ deps.Server.SetExtraTags([]string{"sometag3:somevalue3"})
+
+ test := defaultMetric().withTags([]string{"sometag1:somevalue1", "sometag2:somevalue2", "sometag3:somevalue3"})
+ // Test single metric
+ runTestMetrics(t, deps, defaultMetricInput, []*tMetricSample{test}, []*tMetricSample{})
+
+ // Test multivalue metric
+ test2 := defaultMetric().withValue(500.0).withTags([]string{"sometag1:somevalue1", "sometag2:somevalue2", "sometag3:somevalue3"})
+ input := []byte("daemon:666:500|g|#sometag1:somevalue1,sometag2:somevalue2")
+ runTestMetrics(t, deps, input, []*tMetricSample{test, test2}, []*tMetricSample{})
+}
+
+func TestParseMetricMessageTelemetry(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps, s := fulfillDepsWithInactiveServer(t, cfg)
+
+ assert.Nil(t, s.mapper)
+
+ var samples []metrics.MetricSample
+
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+
+ assert.Equal(t, float64(0), s.tlmProcessedOk.Get())
+ samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", "", false)
+ assert.NoError(t, err)
+ assert.Len(t, samples, 1)
+ assert.Equal(t, float64(1), s.tlmProcessedOk.Get())
+
+ assert.Equal(t, float64(0), s.tlmProcessedError.Get())
+ samples, err = s.parseMetricMessage(samples, parser, nil, "", "", false)
+ assert.Error(t, err, "invalid dogstatsd message format")
+ assert.Len(t, samples, 1)
+ assert.Equal(t, float64(1), s.tlmProcessedError.Get())
+}
+
+func TestMappingCases(t *testing.T) {
+ scenarios := []struct {
+ name string
+ config string
+ packets [][]byte
+ expectedSamples []*tMetricSample
+ expectedCacheSize int
+ }{
+ {
+ name: "Simple OK case",
+ config: `
+dogstatsd_port: __random__
+dogstatsd_mapper_profiles:
+ - name: test
+ prefix: 'test.'
+ mappings:
+ - match: "test.job.duration.*.*"
+ name: "test.job.duration"
+ tags:
+ job_type: "$1"
+ job_name: "$2"
+ - match: "test.job.size.*.*"
+ name: "test.job.size"
+ tags:
+ foo: "$1"
+ bar: "$2"
+`,
+ packets: [][]byte{
+ []byte("test.job.duration.my_job_type.my_job_name:666|g"),
+ []byte("test.job.size.my_job_type.my_job_name:666|g"),
+ []byte("test.job.size.not_match:666|g"),
+ },
+ expectedSamples: []*tMetricSample{
+ defaultMetric().withName("test.job.duration").withTags([]string{"job_type:my_job_type", "job_name:my_job_name"}),
+ defaultMetric().withName("test.job.size").withTags([]string{"foo:my_job_type", "bar:my_job_name"}),
+ defaultMetric().withName("test.job.size.not_match").withTags(nil),
+ },
+ expectedCacheSize: 1000,
+ },
+ {
+ name: "Tag already present",
+ config: `
+dogstatsd_port: __random__
+dogstatsd_mapper_profiles:
+ - name: test
+ prefix: 'test.'
+ mappings:
+ - match: "test.job.duration.*.*"
+ name: "test.job.duration"
+ tags:
+ job_type: "$1"
+ job_name: "$2"
+`,
+ packets: [][]byte{
+ []byte("test.job.duration.my_job_type.my_job_name:666|g"),
+ []byte("test.job.duration.my_job_type.my_job_name:666|g|#some:tag"),
+ []byte("test.job.duration.my_job_type.my_job_name:666|g|#some:tag,more:tags"),
+ },
+ expectedSamples: []*tMetricSample{
+ defaultMetric().withName("test.job.duration").withTags([]string{"job_type:my_job_type", "job_name:my_job_name"}),
+ defaultMetric().withName("test.job.duration").withTags([]string{"job_type:my_job_type", "job_name:my_job_name", "some:tag"}),
+ defaultMetric().withName("test.job.duration").withTags([]string{"job_type:my_job_type", "job_name:my_job_name", "some:tag", "more:tags"}),
+ },
+ expectedCacheSize: 1000,
+ },
+ {
+ name: "Cache size",
+ config: `
+dogstatsd_port: __random__
+dogstatsd_mapper_cache_size: 999
+dogstatsd_mapper_profiles:
+ - name: test
+ prefix: 'test.'
+ mappings:
+ - match: "test.job.duration.*.*"
+ name: "test.job.duration"
+ tags:
+ job_type: "$1"
+ job_name: "$2"
+`,
+ packets: [][]byte{},
+ expectedSamples: nil,
+ expectedCacheSize: 999,
+ },
+ }
+
+ for _, scenario := range scenarios {
+ t.Run(scenario.name, func(t *testing.T) {
+ deps := fulfillDepsWithConfigYaml(t, scenario.config)
+
+ s := deps.Server.(*server)
+
+ requireStart(t, s)
+
+ assert.Equal(t, deps.Config.Get("dogstatsd_mapper_cache_size"), scenario.expectedCacheSize, "Case `%s` failed. cache_size `%s` should be `%s`", scenario.name, deps.Config.Get("dogstatsd_mapper_cache_size"), scenario.expectedCacheSize)
+
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+ var b batcherMock
+ s.parsePackets(&b, parser, genTestPackets(scenario.packets...), metrics.MetricSampleBatch{})
+
+ for idx, sample := range b.samples {
+ scenario.expectedSamples[idx].testMetric(t, sample)
+ }
+ })
+ }
+}
+
+func TestParseEventMessageTelemetry(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps, s := fulfillDepsWithInactiveServer(t, cfg)
+
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+
+ telemetryMock, ok := deps.Telemetry.(telemetry.Mock)
+ assert.True(t, ok)
+
+ // three successful events
+ s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
+ s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
+ s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "")
+ // one error event
+ _, err := s.parseEventMessage(parser, nil, "")
+ assert.Error(t, err)
+
+ processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed")
+ require.NoError(t, err)
+
+ for _, metric := range processedEvents {
+ labels := metric.Tags()
+
+ if labels["message_type"] == "events" && labels["state"] == "ok" {
+ assert.Equal(t, float64(3), metric.Value())
+ }
+
+ if labels["message_type"] == "events" && labels["state"] == "error" {
+ assert.Equal(t, float64(1), metric.Value())
+ }
+ }
+}
+
+func TestParseServiceCheckMessageTelemetry(t *testing.T) {
+ cfg := make(map[string]interface{})
+
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps, s := fulfillDepsWithInactiveServer(t, cfg)
+
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+
+ telemetryMock, ok := deps.Telemetry.(telemetry.Mock)
+ assert.True(t, ok)
+
+ // three successful events
+ s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
+ s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
+ s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "")
+ // one error event
+ _, err := s.parseServiceCheckMessage(parser, nil, "")
+ assert.Error(t, err)
+
+ processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed")
+ require.NoError(t, err)
+
+ for _, metric := range processedEvents {
+ labels := metric.Tags()
+
+ if labels["message_type"] == "service_checks" && labels["state"] == "ok" {
+ assert.Equal(t, float64(3), metric.Value())
+ }
+
+ if labels["message_type"] == "service_checks" && labels["state"] == "error" {
+ assert.Equal(t, float64(1), metric.Value())
+ }
+ }
+}
+
+func TestProcessedMetricsOrigin(t *testing.T) {
+ for _, enabled := range []bool{true, false} {
+ cfg := make(map[string]interface{})
+ cfg["dogstatsd_origin_optout_enabled"] = enabled
+ cfg["dogstatsd_port"] = listeners.RandomPortName
+
+ deps := fulfillDepsWithConfigOverride(t, cfg)
+ s := deps.Server.(*server)
+ assert := assert.New(t)
+
+ s.Stop()
+
+ assert.Len(s.cachedOriginCounters, 0, "this cache must be empty")
+ assert.Len(s.cachedOrder, 0, "this cache list must be empty")
+
+ parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry)
+ samples := []metrics.MetricSample{}
+ samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "test_container", "1", false)
+ assert.NoError(err)
+ assert.Len(samples, 1)
+
+ // one thing should have been stored when we parse a metric
+ samples, err = s.parseMetricMessage(samples, parser, []byte("test.metric:555|g"), "test_container", "1", true)
+ assert.NoError(err)
+ assert.Len(samples, 2)
+ assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached")
+ assert.Len(s.cachedOrder, 1, "one entry should have been cached")
+ assert.Equal(s.cachedOrder[0].origin, "test_container")
+
+ // when we parse another metric (different value) with same origin, cache should contain only one entry
+ samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "test_container", "2", true)
+ assert.NoError(err)
+ assert.Len(samples, 3)
+ assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached")
+ assert.Len(s.cachedOrder, 1, "one entry should have been cached")
+ assert.Equal(s.cachedOrder[0].origin, "test_container")
+ assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "test_container"})
+ assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "test_container"})
+
+ // when we parse another metric (different value) but with a different origin, we should store a new entry
+ samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "another_container", "3", true)
+ assert.NoError(err)
+ assert.Len(samples, 4)
+ assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached")
+ assert.Len(s.cachedOrder, 2, "two entries should have been cached")
+ assert.Equal(s.cachedOrder[0].origin, "test_container")
+ assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "test_container"})
+ assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "test_container"})
+ assert.Equal(s.cachedOrder[1].origin, "another_container")
+ assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "another_container"})
+ assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "another_container"})
+
+ // oldest one should be removed once we reach the limit of the cache
+ maxOriginCounters = 2
+ samples, err = s.parseMetricMessage(samples, parser, []byte("yetanothermetric:525|g"), "third_origin", "3", true)
+ assert.NoError(err)
+ assert.Len(samples, 5)
+ assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, one has been evicted already")
+ assert.Len(s.cachedOrder, 2, "two entries should have been cached, one has been evicted already")
+ assert.Equal(s.cachedOrder[0].origin, "another_container")
+ assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "another_container"})
+ assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "another_container"})
+ assert.Equal(s.cachedOrder[1].origin, "third_origin")
+ assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "third_origin"})
+ assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "third_origin"})
+
+ // oldest one should be removed once we reach the limit of the cache
+ maxOriginCounters = 2
+ samples, err = s.parseMetricMessage(samples, parser, []byte("blablabla:555|g"), "fourth_origin", "4", true)
+ assert.NoError(err)
+ assert.Len(samples, 6)
+ assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, two have been evicted already")
+ assert.Len(s.cachedOrder, 2, "two entries should have been cached, two have been evicted already")
+ assert.Equal(s.cachedOrder[0].origin, "third_origin")
+ assert.Equal(s.cachedOrder[0].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "third_origin"})
+ assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "third_origin"})
+ assert.Equal(s.cachedOrder[1].origin, "fourth_origin")
+ assert.Equal(s.cachedOrder[1].ok, map[string]string{"message_type": "metrics", "state": "ok", "origin": "fourth_origin"})
+ assert.Equal(s.cachedOrder[1].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "fourth_origin"})
+ }
+}
+
+func TestNextMessage(t *testing.T) {
+ scenarios := []struct {
+ name string
+ messages []string
+ eolTermination bool
+ expectedTlm int64
+ expectedMetritCnt int
+ }{
+ {
+ name: "No eol newline, eol enabled",
+ messages: []string{"foo\n", "bar\r\n", "baz\r\n", "quz\n", "hax\r"},
+ eolTermination: true,
+ expectedTlm: 1,
+ expectedMetritCnt: 4, // final message won't be processed, no newline
+ },
+ {
+ name: "No eol newline, eol disabled",
+ messages: []string{"foo\n", "bar\r\n", "baz\r\n", "quz\n", "hax"},
+ eolTermination: false,
+ expectedTlm: 0,
+ expectedMetritCnt: 5,
+ },
+ {
+ name: "Base Case",
+ messages: []string{"foo\n", "bar\r\n", "baz\r\n", "quz\n", "hax\r\n"},
+ eolTermination: true,
+ expectedTlm: 0,
+ expectedMetritCnt: 5,
+ },
+ }
+
+ for _, s := range scenarios {
+ t.Run(s.name, func(t *testing.T) {
+ packet := []byte(strings.Join(s.messages, ""))
+ initialTelem := dogstatsdUnterminatedMetricErrors.Value()
+ res := nextMessage(&packet, s.eolTermination)
+ cnt := 0
+ for res != nil {
+ // Confirm newline/carriage return were not transferred
+ assert.Equal(t, string(res), strings.TrimRight(s.messages[cnt], "\n\r"))
+ res = nextMessage(&packet, s.eolTermination)
+ cnt++
+ }
+
+ assert.Equal(t, s.expectedTlm, dogstatsdUnterminatedMetricErrors.Value()-initialTelem)
+ assert.Equal(t, s.expectedMetritCnt, cnt)
+ })
+ }
+}
diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go
index 7e0f51f509689..9c4fb0d451ed5 100644
--- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go
+++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go
@@ -9,6 +9,7 @@ package eventplatformimpl
import (
"context"
"fmt"
+ "strconv"
"strings"
"sync"
@@ -27,6 +28,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/client"
logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/sender"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
"github.com/DataDog/datadog-agent/pkg/util/log"
@@ -393,15 +395,18 @@ func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver e
if endpoints.InputChanSize <= pkgconfigsetup.DefaultInputChanSize {
endpoints.InputChanSize = desc.defaultInputChanSize
}
+
+ pipelineMonitor := metrics.NewNoopPipelineMonitor(strconv.Itoa(pipelineID))
+
reliable := []client.Destination{}
for i, endpoint := range endpoints.GetReliableEndpoints() {
- telemetryName := fmt.Sprintf("%s_%d_reliable_%d", desc.eventType, pipelineID, i)
- reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, pkgconfigsetup.Datadog()))
+ destMeta := client.NewDestinationMetadata(desc.eventType, pipelineMonitor.ID(), "reliable", strconv.Itoa(i))
+ reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, destMeta, pkgconfigsetup.Datadog(), pipelineMonitor))
}
additionals := []client.Destination{}
for i, endpoint := range endpoints.GetUnReliableEndpoints() {
- telemetryName := fmt.Sprintf("%s_%d_unreliable_%d", desc.eventType, pipelineID, i)
- additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, pkgconfigsetup.Datadog()))
+ destMeta := client.NewDestinationMetadata(desc.eventType, pipelineMonitor.ID(), "unreliable", strconv.Itoa(i))
+ additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, destMeta, pkgconfigsetup.Datadog(), pipelineMonitor))
}
destinations := client.NewDestinations(reliable, additionals)
inputChan := make(chan *message.Message, endpoints.InputChanSize)
@@ -426,14 +431,15 @@ func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver e
endpoints.BatchMaxSize,
endpoints.BatchMaxContentSize,
desc.eventType,
- encoder)
+ encoder,
+ pipelineMonitor)
}
a := auditor.NewNullAuditor()
log.Debugf("Initialized event platform forwarder pipeline. eventType=%s mainHosts=%s additionalHosts=%s batch_max_concurrent_send=%d batch_max_content_size=%d batch_max_size=%d, input_chan_size=%d",
desc.eventType, joinHosts(endpoints.GetReliableEndpoints()), joinHosts(endpoints.GetUnReliableEndpoints()), endpoints.BatchMaxConcurrentSend, endpoints.BatchMaxContentSize, endpoints.BatchMaxSize, endpoints.InputChanSize)
return &passthroughPipeline{
- sender: sender.NewSender(coreConfig, senderInput, a.Channel(), destinations, 10, nil, nil),
+ sender: sender.NewSender(coreConfig, senderInput, a.Channel(), destinations, 10, nil, nil, pipelineMonitor),
strategy: strategy,
in: inputChan,
auditor: a,
diff --git a/comp/logs/agent/agentimpl/agent.go b/comp/logs/agent/agentimpl/agent.go
index 0849d55b2b7ef..3a7ada8e08a23 100644
--- a/comp/logs/agent/agentimpl/agent.go
+++ b/comp/logs/agent/agentimpl/agent.go
@@ -22,7 +22,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/hostname"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
statusComponent "github.com/DataDog/datadog-agent/comp/core/status"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/logs/agent"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go
index d419f6aa52399..b171e0c087731 100644
--- a/comp/logs/agent/agentimpl/agent_test.go
+++ b/comp/logs/agent/agentimpl/agent_test.go
@@ -26,8 +26,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
@@ -95,7 +95,7 @@ func (suite *AgentTestSuite) SetupTest() {
// Shorter grace period for tests.
suite.configOverrides["logs_config.stop_grace_period"] = 1
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := taggerMock.SetupFakeTagger(suite.T())
suite.tagger = fakeTagger
}
@@ -125,7 +125,7 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent,
inventoryagentimpl.MockModule(),
))
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := taggerMock.SetupFakeTagger(suite.T())
agent := &logAgent{
log: deps.Log,
diff --git a/comp/logs/agent/agentimpl/serverless.go b/comp/logs/agent/agentimpl/serverless.go
index 3e280f8e2921c..44752f65726cc 100644
--- a/comp/logs/agent/agentimpl/serverless.go
+++ b/comp/logs/agent/agentimpl/serverless.go
@@ -11,7 +11,7 @@ import (
"go.uber.org/atomic"
logComponent "github.com/DataDog/datadog-agent/comp/core/log/impl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/logs/agent"
flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
diff --git a/comp/logs/agent/config/constants.go b/comp/logs/agent/config/constants.go
index 2fd39527b3b90..ae9a0d74680f0 100644
--- a/comp/logs/agent/config/constants.go
+++ b/comp/logs/agent/config/constants.go
@@ -7,9 +7,7 @@ package config
// Pipeline constraints
const (
- ChanSize = 100
- DestinationPayloadChanSize = 10
- NumberOfPipelines = 4
+ NumberOfPipelines = 4
)
const (
diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go
index 253e84d8e8eb7..91033327fbd5a 100644
--- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go
+++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go
@@ -19,8 +19,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
logsBundle "github.com/DataDog/datadog-agent/comp/logs"
@@ -152,7 +152,7 @@ func TestGetPayload(t *testing.T) {
// Register an error
src.Status.Error(fmt.Errorf("No such file or directory"))
logSources.AddSource(src)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
mockLogAgent := fxutil.Test[optional.Option[logagent.Mock]](
t,
diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
index 4e0382ca99b3f..4ee787e567320 100644
--- a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
+++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go
@@ -28,6 +28,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
"github.com/DataDog/datadog-agent/pkg/networkpath/telemetry"
"github.com/DataDog/datadog-agent/pkg/networkpath/traceroute"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/process/statsd"
)
@@ -72,7 +73,7 @@ type npCollectorImpl struct {
TimeNowFn func() time.Time
// TODO: instead of mocking traceroute via function replacement like this
// we should ideally create a fake/mock traceroute instance that can be passed/injected in NpCollector
- runTraceroute func(cfg traceroute.Config, telemetrycomp telemetryComp.Component) (payload.NetworkPath, error)
+ runTraceroute func(cfg config.Config, telemetrycomp telemetryComp.Component) (payload.NetworkPath, error)
networkDevicesNamespace string
}
@@ -227,7 +228,7 @@ func (s *npCollectorImpl) runTracerouteForPath(ptest *pathteststore.PathtestCont
s.logger.Debugf("Run Traceroute for ptest: %+v", ptest)
startTime := s.TimeNowFn()
- cfg := traceroute.Config{
+ cfg := config.Config{
DestHostname: ptest.Pathtest.Hostname,
DestPort: ptest.Pathtest.Port,
MaxTTL: uint8(s.collectorConfigs.maxTTL),
@@ -262,7 +263,7 @@ func (s *npCollectorImpl) runTracerouteForPath(ptest *pathteststore.PathtestCont
}
}
-func runTraceroute(cfg traceroute.Config, telemetry telemetryComp.Component) (payload.NetworkPath, error) {
+func runTraceroute(cfg config.Config, telemetry telemetryComp.Component) (payload.NetworkPath, error) {
tr, err := traceroute.New(cfg, telemetry)
if err != nil {
return payload.NetworkPath{}, fmt.Errorf("new traceroute error: %s", err)
diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
index c44b54e1b1db9..390ce1c8684f4 100644
--- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
+++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go
@@ -32,7 +32,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/message"
"github.com/DataDog/datadog-agent/pkg/networkpath/metricsender"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
- "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/trace/teststatsd"
utillog "github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -101,7 +101,7 @@ func Test_NpCollector_runningAndProcessing(t *testing.T) {
assert.True(t, npCollector.running)
- npCollector.runTraceroute = func(cfg traceroute.Config, _ telemetry.Component) (payload.NetworkPath, error) {
+ npCollector.runTraceroute = func(cfg config.Config, _ telemetry.Component) (payload.NetworkPath, error) {
var p payload.NetworkPath
if cfg.DestHostname == "10.0.0.2" {
p = payload.NetworkPath{
diff --git a/comp/otelcol/collector-contrib/def/go.mod b/comp/otelcol/collector-contrib/def/go.mod
index b39dd3fceaaaf..52b963fe0c2e1 100644
--- a/comp/otelcol/collector-contrib/def/go.mod
+++ b/comp/otelcol/collector-contrib/def/go.mod
@@ -2,25 +2,36 @@ module github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def
go 1.22.0
-require go.opentelemetry.io/collector/otelcol v0.111.0
+require go.opentelemetry.io/collector/otelcol v0.113.0
+
+require (
+ go.opentelemetry.io/collector/connector/connectortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect
+)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/ebitengine/purego v0.8.0 // indirect
+ github.com/ebitengine/purego v0.8.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
@@ -32,64 +43,60 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
- github.com/prometheus/client_golang v1.20.4 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.60.0 // indirect
+ github.com/prometheus/common v0.60.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/shirou/gopsutil/v4 v4.24.9 // indirect
+ github.com/shirou/gopsutil/v4 v4.24.10 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector v0.111.0 // indirect
- go.opentelemetry.io/collector/component v0.111.0 // indirect
- go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/confmap v1.17.0 // indirect
- go.opentelemetry.io/collector/connector v0.111.0 // indirect
- go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.17.0 // indirect
- go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/collector/service v0.111.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0 // indirect
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.19.0 // indirect
+ go.opentelemetry.io/collector/connector v0.113.0 // indirect
+ go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/collector/service v0.113.0 // indirect
go.opentelemetry.io/contrib/config v0.10.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
- go.opentelemetry.io/otel/log v0.6.0 // indirect
- go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/trace v1.30.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.53.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/log v0.7.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
@@ -98,9 +105,9 @@ require (
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
gonum.org/v1/gonum v0.15.1 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/comp/otelcol/collector-contrib/def/go.sum b/comp/otelcol/collector-contrib/def/go.sum
index 1faf2bd1b820c..094366b1b9664 100644
--- a/comp/otelcol/collector-contrib/def/go.sum
+++ b/comp/otelcol/collector-contrib/def/go.sum
@@ -8,12 +8,12 @@ github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
-github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE=
+github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -21,8 +21,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
@@ -43,8 +43,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
@@ -70,25 +70,27 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
-github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
-github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
+github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
-github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
+github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM=
+github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -105,128 +107,138 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
-go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro=
-go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY=
-go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws=
-go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk=
-go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
-go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
+go.opentelemetry.io/collector v0.113.0 h1:dBuo2/OKBhoMCR86W4fFJLXGQ0gJfKRmi65AZwFkU2I=
+go.opentelemetry.io/collector v0.113.0/go.mod h1:XbjD4Yw9LunLo3IJu3ZZytNZ0drEVznxw1Z14Ujlw3s=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/connector v0.113.0 h1:ii+s1CjsLxtglqRlFs6tv8UU/uX45dyN9lbTRbR0p8g=
+go.opentelemetry.io/collector/connector v0.113.0/go.mod h1:KmA8eQouTVxVk65Bf6utuMjWovMuOvNVRcYV60CAGtc=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 h1:yAEKTxVGpBtHrrXeZFrBcVOQkduxCncH0o4hqKrDRyw=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0/go.mod h1:+mwzwIZ1cEK29ByfC38uF8hmFO8Wf9ShT1c756XX+RI=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0 h1:WHekoL0izkrKLVQLv79v0QhqfnXkVcw0sgdF07EqWLM=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0/go.mod h1:KouywNfkxRf+yzbI2pdolzTLkLoCV4ASEI2o2pDt+Cg=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 h1:GuJzpnrJtsMrKWGmb1VL4EqL6x1HDtZmtvy3yEjth6Y=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0/go.mod h1:oa72qndu7nAfEeEpDyDi9qLcaSJGIscLc/eeojFADx0=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0 h1:b/Clxso9uVwLVYjvRQ1NGBWHpUEZ/++uA5sJbBj0ryo=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0/go.mod h1:5csGYy9Ydfy6Hpw3Tod864P6HUEZpA6UiuPJPG3TjSU=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 h1:Beu2zAN6/EDXQ6hMFU6FT1BsnU5FXmWNOlfTAhrgbGc=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0/go.mod h1:WUXbc4L6KJ3SpmsxBgId0OYzRDuS7n274kNpqrgnSmY=
+go.opentelemetry.io/collector/otelcol v0.113.0 h1:t32gA8Pg9lsqYQml4mgvHdqFSsjvPZMvGywi0zXQYjk=
+go.opentelemetry.io/collector/otelcol v0.113.0/go.mod h1:PV6pDLPEaulRs3ceWYNEDuG5100F35I5VzeC2ekT/vY=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 h1:PwQnErsLvEd1x6VIyjLmKQot9huKWqIfEz1kd+8aj4k=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0/go.mod h1:tChJYsCG3wc6JPT9aJO3y+32V14NhmCFZOh3k5ORGdQ=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/service v0.113.0 h1:SFT+kWuj9TehC34MLEWfXH6QocGl3MYqLJ7UbxZtWzM=
+go.opentelemetry.io/collector/service v0.113.0/go.mod h1:6+JY80Yd4J4RWpvRmpCUUZFOZKGVs9a1QKCKPlDrKfs=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw=
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
-go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
-go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0=
-go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8=
-go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
-go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
-go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI=
-go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE=
-go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
-go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ=
+go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko=
+go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 h1:HZgBIps9wH0RDrwjrmNa3DVbNRW60HEhdzqZFyAp3fI=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0/go.mod h1:RDRhvt6TDG0eIXmonAx5bd9IcwpqCkziwkOClzWKwAQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
+go.opentelemetry.io/otel/log v0.7.0 h1:d1abJc0b1QQZADKvfe9JqqrfmPYQCz2tUSO+0XZmuV4=
+go.opentelemetry.io/otel/log v0.7.0/go.mod h1:2jf2z7uVfnzDNknKTO9G+ahcOAyWcp1fJmk/wJjULRo=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ=
+go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -274,14 +286,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0=
gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/comp/otelcol/collector-contrib/impl/components.go b/comp/otelcol/collector-contrib/impl/components.go
index 8447db0c86273..016237e62f9ed 100644
--- a/comp/otelcol/collector-contrib/impl/components.go
+++ b/comp/otelcol/collector-contrib/impl/components.go
@@ -71,14 +71,14 @@ func components() (otelcol.Factories, error) {
return otelcol.Factories{}, err
}
factories.ExtensionModules = make(map[component.Type]string, len(factories.Extensions))
- factories.ExtensionModules[zpagesextension.NewFactory().Type()] = "go.opentelemetry.io/collector/extension/zpagesextension v0.111.0"
- factories.ExtensionModules[healthcheckextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0"
- factories.ExtensionModules[pprofextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0"
- factories.ExtensionModules[dockerobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0"
- factories.ExtensionModules[ecsobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0"
- factories.ExtensionModules[ecstaskobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0"
- factories.ExtensionModules[hostobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0"
- factories.ExtensionModules[k8sobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0"
+ factories.ExtensionModules[zpagesextension.NewFactory().Type()] = "go.opentelemetry.io/collector/extension/zpagesextension v0.113.0"
+ factories.ExtensionModules[healthcheckextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0"
+ factories.ExtensionModules[pprofextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0"
+ factories.ExtensionModules[dockerobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0"
+ factories.ExtensionModules[ecsobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0"
+ factories.ExtensionModules[ecstaskobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0"
+ factories.ExtensionModules[hostobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0"
+ factories.ExtensionModules[k8sobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0"
factories.Receivers, err = receiver.MakeFactoryMap(
nopreceiver.NewFactory(),
@@ -95,15 +95,15 @@ func components() (otelcol.Factories, error) {
return otelcol.Factories{}, err
}
factories.ReceiverModules = make(map[component.Type]string, len(factories.Receivers))
- factories.ReceiverModules[nopreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0"
- factories.ReceiverModules[otlpreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0"
- factories.ReceiverModules[filelogreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0"
- factories.ReceiverModules[fluentforwardreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0"
- factories.ReceiverModules[hostmetricsreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0"
- factories.ReceiverModules[jaegerreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0"
- factories.ReceiverModules[prometheusreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0"
- factories.ReceiverModules[receivercreator.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0"
- factories.ReceiverModules[zipkinreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0"
+ factories.ReceiverModules[nopreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0"
+ factories.ReceiverModules[otlpreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0"
+ factories.ReceiverModules[filelogreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0"
+ factories.ReceiverModules[fluentforwardreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0"
+ factories.ReceiverModules[hostmetricsreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0"
+ factories.ReceiverModules[jaegerreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0"
+ factories.ReceiverModules[prometheusreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0"
+ factories.ReceiverModules[receivercreator.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0"
+ factories.ReceiverModules[zipkinreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0"
factories.Exporters, err = exporter.MakeFactoryMap(
debugexporter.NewFactory(),
@@ -116,11 +116,11 @@ func components() (otelcol.Factories, error) {
return otelcol.Factories{}, err
}
factories.ExporterModules = make(map[component.Type]string, len(factories.Exporters))
- factories.ExporterModules[debugexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/debugexporter v0.111.0"
- factories.ExporterModules[nopexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/nopexporter v0.111.0"
- factories.ExporterModules[otlpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0"
- factories.ExporterModules[otlphttpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0"
- factories.ExporterModules[sapmexporter.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0"
+ factories.ExporterModules[debugexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/debugexporter v0.113.0"
+ factories.ExporterModules[nopexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/nopexporter v0.113.0"
+ factories.ExporterModules[otlpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0"
+ factories.ExporterModules[otlphttpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0"
+ factories.ExporterModules[sapmexporter.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0"
factories.Processors, err = processor.MakeFactoryMap(
batchprocessor.NewFactory(),
@@ -141,19 +141,19 @@ func components() (otelcol.Factories, error) {
return otelcol.Factories{}, err
}
factories.ProcessorModules = make(map[component.Type]string, len(factories.Processors))
- factories.ProcessorModules[batchprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/batchprocessor v0.111.0"
- factories.ProcessorModules[memorylimiterprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0"
- factories.ProcessorModules[attributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0"
- factories.ProcessorModules[cumulativetodeltaprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0"
- factories.ProcessorModules[filterprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0"
- factories.ProcessorModules[groupbyattrsprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0"
- factories.ProcessorModules[k8sattributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0"
- factories.ProcessorModules[probabilisticsamplerprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0"
- factories.ProcessorModules[resourcedetectionprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0"
- factories.ProcessorModules[resourceprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0"
- factories.ProcessorModules[routingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0"
- factories.ProcessorModules[tailsamplingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0"
- factories.ProcessorModules[transformprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0"
+ factories.ProcessorModules[batchprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/batchprocessor v0.113.0"
+ factories.ProcessorModules[memorylimiterprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0"
+ factories.ProcessorModules[attributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0"
+ factories.ProcessorModules[cumulativetodeltaprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0"
+ factories.ProcessorModules[filterprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0"
+ factories.ProcessorModules[groupbyattrsprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0"
+ factories.ProcessorModules[k8sattributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0"
+ factories.ProcessorModules[probabilisticsamplerprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0"
+ factories.ProcessorModules[resourcedetectionprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0"
+ factories.ProcessorModules[resourceprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0"
+ factories.ProcessorModules[routingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0"
+ factories.ProcessorModules[tailsamplingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0"
+ factories.ProcessorModules[transformprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0"
factories.Connectors, err = connector.MakeFactoryMap(
spanmetricsconnector.NewFactory(),
@@ -162,7 +162,7 @@ func components() (otelcol.Factories, error) {
return otelcol.Factories{}, err
}
factories.ConnectorModules = make(map[component.Type]string, len(factories.Connectors))
- factories.ConnectorModules[spanmetricsconnector.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0"
+ factories.ConnectorModules[spanmetricsconnector.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0"
return factories, nil
}
diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod
index 8e2259aa52a00..d6cb082973460 100644
--- a/comp/otelcol/collector-contrib/impl/go.mod
+++ b/comp/otelcol/collector-contrib/impl/go.mod
@@ -4,53 +4,53 @@ module github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib
go 1.22.0
-toolchain go1.22.5
+toolchain go1.22.8
require (
github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.0.0-00010101000000-000000000000
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/connector v0.111.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/exporter/debugexporter v0.111.0
- go.opentelemetry.io/collector/exporter/nopexporter v0.111.0
- go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0
- go.opentelemetry.io/collector/extension v0.111.0
- go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- go.opentelemetry.io/collector/otelcol v0.111.0
- go.opentelemetry.io/collector/processor v0.111.0
- go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0
- go.opentelemetry.io/collector/receiver v0.111.0
- go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0
- go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/connector v0.113.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/exporter/debugexporter v0.113.0
+ go.opentelemetry.io/collector/exporter/nopexporter v0.113.0
+ go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+ go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0
+ go.opentelemetry.io/collector/extension v0.113.0
+ go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ go.opentelemetry.io/collector/otelcol v0.113.0
+ go.opentelemetry.io/collector/processor v0.113.0
+ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+ go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0
+ go.opentelemetry.io/collector/receiver v0.113.0
+ go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0
+ go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
)
require (
@@ -64,18 +64,18 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Code-Hex/go-generics-cache v1.5.1 // indirect
- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Showmax/go-fqdn v1.0.0 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
- github.com/antchfx/xmlquery v1.4.1 // indirect
- github.com/antchfx/xpath v1.3.1 // indirect
+ github.com/antchfx/xmlquery v1.4.2 // indirect
+ github.com/antchfx/xpath v1.3.2 // indirect
github.com/apache/thrift v0.21.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
+ github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect
@@ -83,10 +83,10 @@ require (
github.com/dennwc/varint v1.0.0 // indirect
github.com/digitalocean/godo v1.118.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
- github.com/docker/docker v27.0.3+incompatible // indirect
+ github.com/docker/docker v27.3.1+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/ebitengine/purego v0.8.0 // indirect
+ github.com/ebitengine/purego v0.8.1 // indirect
github.com/elastic/go-grok v0.3.1 // indirect
github.com/elastic/lunes v0.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
@@ -95,7 +95,7 @@ require (
github.com/expr-lang/expr v1.16.9 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
@@ -106,7 +106,7 @@ require (
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-resty/resty/v2 v2.13.1 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.3 // indirect
@@ -129,7 +129,7 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
- github.com/hashicorp/consul/api v1.29.4 // indirect
+ github.com/hashicorp/consul/api v1.30.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -148,13 +148,13 @@ require (
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect
- github.com/jaegertracing/jaeger v1.61.0 // indirect
+ github.com/jaegertracing/jaeger v1.62.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
@@ -180,26 +180,26 @@ require (
github.com/mostynb/go-grpc-compression v1.2.3 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/openshift/api v3.9.0+incompatible // indirect
@@ -207,28 +207,29 @@ require (
github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/ovh/go-ovh v1.6.0 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
github.com/prometheus-community/windows_exporter v0.27.2 // indirect
- github.com/prometheus/client_golang v1.20.4 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.60.0 // indirect
+ github.com/prometheus/common v0.60.1 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/prometheus v0.54.1 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect
- github.com/shirou/gopsutil/v4 v4.24.9 // indirect
- github.com/signalfx/sapm-proto v0.14.0 // indirect
+ github.com/shirou/gopsutil/v4 v4.24.10 // indirect
+ github.com/signalfx/sapm-proto v0.16.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.9.0 // indirect
- github.com/tinylib/msgp v1.2.2 // indirect
+ github.com/tinylib/msgp v1.2.3 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 // indirect
@@ -237,63 +238,72 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector v0.111.0 // indirect
- go.opentelemetry.io/collector/client v1.17.0 // indirect
- go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configgrpc v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confighttp v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.111.0 // indirect
- go.opentelemetry.io/collector/confmap v1.17.0 // indirect
- go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.17.0 // indirect
- go.opentelemetry.io/collector/filter v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/collector/service v0.111.0 // indirect
+ go.opentelemetry.io/collector v0.113.0 // indirect
+ go.opentelemetry.io/collector/client v1.19.0 // indirect
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configauth v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configgrpc v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confighttp v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confignet v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configopaque v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtls v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/internal v0.113.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.19.0 // indirect
+ go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/connector/connectortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0 // indirect
+ go.opentelemetry.io/collector/filter v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/memorylimiter v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/collector/service v0.113.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect
go.opentelemetry.io/contrib/config v0.10.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
- go.opentelemetry.io/contrib/zpages v0.55.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
- go.opentelemetry.io/otel/log v0.6.0 // indirect
- go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/trace v1.30.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect
+ go.opentelemetry.io/contrib/zpages v0.56.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.53.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/log v0.7.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
@@ -311,17 +321,17 @@ require (
golang.org/x/tools v0.26.0 // indirect
gonum.org/v1/gonum v0.15.1 // indirect
google.golang.org/api v0.188.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/api v0.31.1 // indirect
- k8s.io/apimachinery v0.31.1 // indirect
- k8s.io/client-go v0.31.1 // indirect
+ k8s.io/api v0.31.2 // indirect
+ k8s.io/apimachinery v0.31.2 // indirect
+ k8s.io/client-go v0.31.2 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
diff --git a/comp/otelcol/collector-contrib/impl/go.sum b/comp/otelcol/collector-contrib/impl/go.sum
index 2a1b8c1bbf803..0c6ac48606e70 100644
--- a/comp/otelcol/collector-contrib/impl/go.sum
+++ b/comp/otelcol/collector-contrib/impl/go.sum
@@ -69,14 +69,12 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
-github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0=
-github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@@ -95,10 +93,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
-github.com/antchfx/xmlquery v1.4.1 h1:YgpSwbeWvLp557YFTi8E3z6t6/hYjmFEtiEKbDfEbl0=
-github.com/antchfx/xmlquery v1.4.1/go.mod h1:lKezcT8ELGt8kW5L+ckFMTbgdR61/odpPgDv8Gvi1fI=
-github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk=
-github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
+github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA=
+github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA=
+github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U=
+github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE=
github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -119,8 +117,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
-github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
+github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -136,14 +134,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
-github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
-github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
-github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
-github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
-github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
+github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -158,15 +154,15 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
-github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
-github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE=
+github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U=
github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64=
github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4=
@@ -201,8 +197,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@@ -255,8 +251,8 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -376,10 +372,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwn
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
-github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
-github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
-github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
-github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
+github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
+github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@@ -450,8 +444,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
-github.com/jaegertracing/jaeger v1.61.0 h1:9PCP5vkilmoxmSHa9jFvtULoduFJqJ0/bHbRxUMPWTc=
-github.com/jaegertracing/jaeger v1.61.0/go.mod h1:DMy9PNQ7tOgo811jOv7UAQaM0NeSJ95lh6SW3O1s1Xk=
+github.com/jaegertracing/jaeger v1.62.0 h1:YoaJ2e8oVz5sqGGlVAKSUCED8DzJ1q7PojBmZFNKoJA=
+github.com/jaegertracing/jaeger v1.62.0/go.mod h1:jhEIHazwyb+a6xlRBi+p96BAvTYTSmGkghcwdQfV7FM=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@@ -476,8 +470,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
@@ -565,6 +559,8 @@ github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
+github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -625,114 +621,114 @@ github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557c
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0 h1:9rSlNU6xUEcgneB7Pm502VMH63Abc8Ibpd9y0fBit3Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0/go.mod h1:J87FjckPF9jl1MLA36Yemp6JfsCMNk0QDUBb+7rLw7E=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0 h1:p8vV11sj1cJFbd3B9tuGiA9gMGTvaSR4A57qQvVs9iY=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0/go.mod h1:sBcqg1DlNC6V8e455vASnIrr8LJX7stQ8V3wlbCsIhM=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0 h1:65bd8qYi83LkBrhWEfw0d46p71YBZmPHoIvx/+DJ0cI=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0/go.mod h1:P7L8iofBjb57TW/7NlmAn68fs6ayzhNJRzIO2d+UwhI=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0 h1:59r33b8JeJUCQElz57S7AbwqUfQ1f1FVVjgyjiZp7Ec=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0/go.mod h1:4/axxWsIgRRptIg4olabh6ZXNL0Xt0Qjpaij8mD+dt8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0 h1:TcJ6/abaapOCRP0EfMaWbLavFF05dyFe+i99k4ABumM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0/go.mod h1:betAqGGfDFb8SLyMYBwH9BQyB9wzxWOWXXC/Ht6/kas=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0 h1:w+WuYgdPpak1XzQh+RUXGaTiwBzIOlL+xfg1eE0XatM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0/go.mod h1:augVLlrOmDTXR/TcpG4ZkpnTsh629dmrCkgW5zpVySE=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0 h1:Lb2NxWlKjDxGpWRvuroGTxPTk3zbTM6DsRZoN/lHJYM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0/go.mod h1:asw3mhiAUu9Vv8QRVRTnTB6at2OVP7u9ucDjX7K/yBw=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0 h1:SyRe1UcR+D5KQvgbcfMfBwf/6HSwggptgTaDlRXMuXc=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0/go.mod h1:Z9hwMuYMYlL6GN6zEDhBxiejJZrjjdb492J3TdlrWf4=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0 h1:L6xShMvEZcWtKGguMkUcem6EDaJXVT4nN8FAkUfiPsA=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0/go.mod h1:myfv37ZXoLD5aO6qp2sjwKUiEopLIbwOmCBvC+fjGj4=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0 h1:tDZgAMfdxJxYOnNg0U5q2F+0ATri6IVpiE7XOtoLRI8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0/go.mod h1:huTYwd0Qjl2xjPtgHVwOW27UEY19Zdh8pJDf+JvLC+I=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0 h1:m/u2iRttl/nEjp0EZ9w371LLAqogw0tDn+wPU6D7ArY=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0/go.mod h1:mmkCl306sRZYt/7uNmjvuRRvbe/xUDSDm8fEAGSSMKI=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0 h1:n1p2DedLvPEN1XEx26s1PR1PCuXTgCY4Eo+kDTq7q0s=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0/go.mod h1:PdhkBzDs6Qvcv3lxNQw0ztrGaa1foNBODrF2v09zlyA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 h1:5tERPDm3N3lTHWwSAK1KsGLc8/oi6HtjvLvrP21oZMM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0/go.mod h1:J1NJse8mJrVz3HeMoSRH3qAU5/y0cBRVf1Acl/lWVz8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 h1:QhEwQTGTXitMPbmyloNfLVz1r9YzZ8izJUJivI8obzs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0 h1:AviHrU5O4Dho+/4Jb4zQ4A3gYAxBhy3RwYQuZY8bXkM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0/go.mod h1:0gILoAExLgqNNcSsLxIRPUlLfDP+OKbDk3cTpB3l73Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 h1:AFzcAfNereWXW8SP5rPtslxv8kNo3LCnnCjUzl7ZCVM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0/go.mod h1:fEtKy/bUTeRKDblbFM9IyIA/QjhepmPs36TtjO1N7mo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 h1:jKLX/Ojg8bcHtUaoS585CMbvNJdEFHZjdx233SRdf3s=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0/go.mod h1:Sw1fPP1MkfGFoq1gnbLap5hdH1aoRUCVF5nrLymHa90=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.111.0 h1:yBslPtBsJq0Ftf0w+wEV1NHMZhpqFm9dh5z7IlrmVBI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.111.0/go.mod h1:5gQLQLJ4AuMKnffhE1dBs86TAJeF52x7FVwibZ7o4YY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 h1:kKfYR5GCVeLfkjZWMYZtnvv7NqKY9M1NaZUKVXye+2A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0/go.mod h1:tgQHCvogGlsnxQgtyhy+OwvBF4FDmK8dPlxs6nahdWs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 h1:g9U+7hjEm1yUgaO1rJxstfLW7aEeo3S1tUyyvMlf7A8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0/go.mod h1:tL9m9RF+SGLi80ai1SAy1S/o60kedifzjy0gtGQsnmY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0 h1:WUvjZUaII7vSxGqRZAKYLiBY4yIZuZHiUYNmMktcAgA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0/go.mod h1:TJzFbof2CdM6/VJgOzNssq5Pe+ewGizrha4QfOK4bwA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0 h1:lkLEZTKVjTVRJlyLPlZbS5JPCJQXT+eRo25WM2Jirk8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0/go.mod h1:NNkYGRH4ADBR7XSrto2bP2TIZlVJsBSyNMtsjpWUfio=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0 h1:6AeBTgAQGW/0q7c2UBAUTqu5+Zq/tivzrcYEJQQrOB4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0/go.mod h1:iufc35mK+M7hc5Z7BzVE3DGH8E6eJjgeyU78CKUVJDQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0 h1:7oG2+U/9dlbz0T3uvjt71eoY6vpLrnkswm/aLQi9zBw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0/go.mod h1:DXg1nWKUfkudDIYg3PB62EZH/DcHzOC22QB85TOE3BA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0 h1:0MJmp4O7KUQOUmQYJEGNgtf30Nhx/3nLMn0jnU4Klhw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0/go.mod h1:4PYgwpscyZUUdQVLsd7dh+LXtm1QbWCvU47P3G/7tLg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 h1:W0SthymNSB2fzGuY2KUib6EVyj/uGO3hJvaM6nW0evE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0/go.mod h1:GQHN6IbBsaGmMJIOQcqA7RXiJi55rXldP3di5YJ1IYA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 h1:Ld/1EUAQ6z3CirSyf4A8waHzUAZbMPrDOno+7tb0vKM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0/go.mod h1:wAOT1iGOOTPTw2ysr0DW2Wrfi0/TECVgiGByRQfFiV4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 h1:RSbk3ty1D9zeBC/elcqVdJoZjpAa331Wha99yNHnH6w=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0/go.mod h1:iDBwbN0by4Y75X6j5PuRoJL5MpoaDv0l7s8dHFQHJPU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0 h1:7DqvnAOXIPv6PEKA347VXACc07E1utEWcjuxsY4YOXA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0/go.mod h1:6hlplIB2LiSciMabYB5IpwrBn3Hl/P8JakNm0uNkVug=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 h1:TnAhTFTwmJzFq6vVcf57lnRzAp+rNx5tEyrMudtDGsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0/go.mod h1:l0CUp7vTH+Wv0tF5PYaHpPn1dLiVuMRAMqbBgXFpz54=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0 h1:c8dHCbDk8RNYC8x2Go+yatwQCK8zgk3i6FmT3p0JJec=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0/go.mod h1:c1ZBGwAPwSzJqWFnyAygLCbFO1GcMzpiwwqyfvYL1IU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 h1:BCev4nJfHH2u9AsWFfxR1o1Vt5HoW9myN4jaktZInRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0/go.mod h1:xJ8w6JN/tfRpUXTU6jx/bYmTIcy7OTz7PVFVR/SdqC8=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0 h1:EXgr2fMBJJFnmw3GVRD2fhX3Dqq11g1IoUsrhWfcDn4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0/go.mod h1:uDdKEduyex67rYq75wyUJC1Wl0QhrqBE09WDa1SznMA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0 h1:9vE440Q98eNLd/estFIDgX1jczzU978yGarFLIMieEU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0/go.mod h1:HK8p16A0OoXqhehCTW3QxgWNeshuIDucGUpGwpw88Og=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0 h1:h5TnZkApRY8MbauD64R2CXKY3SvkjL3+H0xzdee8Yx0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0/go.mod h1:5sDugbmzTH9mwv+/bHHeDh3GxG2OFcgsBNvAeb5HQS0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0 h1:G5aPa8QaH114z2l6mLPDsFLnZIp/gEMYnOZ3ePt6Rs8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0/go.mod h1:cqLqEaIRSmik2ayXSeHjlhQST0FumictNqM30KNwUU8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0 h1:HY9tieXpiyNQYPVzRR4uzBuAmyWOqwHUcYSKD2a0wxU=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0/go.mod h1:H9N5NbDS3ZIsERRBxZaUoM+F5tM3Uphuw/757T1HM3Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0 h1:B6o+FR0+9zPhjX97ABREAlHrqLKJCOodrgh4UoYWvHs=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0/go.mod h1:FCRWxxbf+uIXnz1Q3vsOQkzsw30aA6x9ylaYXhzX8jM=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0 h1:PPivenkwOU4XDjuGhU24d4dF4luu20RZeV+arB53wnA=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0/go.mod h1:+FGkQ0QQbJnNDiXme+GhH1HJybOEaxpmPNBQC/j5DEo=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0 h1:UUo4VOhBMIm1ZTs9xmZO4IFazLHsjUZnPkS0+q7qNL4=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0/go.mod h1:5Ntpnh1KsrfOnPVUWCk/lte4Gma12bJPU8EhBS3AKSE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0 h1:esaZ1MYqKA6dNkrFk4U0xhX7E2E/Wht4WBYWjTXexbo=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0/go.mod h1:MI0kHmeMSQxG5ZDz3gU3k3KZNRdULzbKdareO7KDGE0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0 h1:Ce2Ucsp+DOk6OTYsAp8ocQ0NbGPkYXYDlIp/XJeeMck=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0/go.mod h1:k49ONUacPMoCrfUpBJES5MdriG90hvcDKvr9abTItRc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0 h1:0yYcJnw0vXRhRGHX0BFkN8L1L4xf5NsPVgTVOgjb8k4=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0/go.mod h1:gJHCfB2sgjKPxxBVHNgpL/gI8dSgonj2k4HGeyadxe8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0 h1:DF+kp5Gtg5B95VncWJb1oOIvf9PGpZ/gxWAHLdIrTEk=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0/go.mod h1:UzFds6+yARb/SHnC93hMeGWKJIDA131nm2dxZW+kTsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0 h1:KkHeODEukk2RveIEHvV5dPe06oA2PKAKbpjVZPtCRsQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0/go.mod h1:Ijvd5VMB2tstz3+3BiQy5azewQ31N4fytMFNdo8dLWE=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0 h1:iQmn0AXvTu5VV/YxW5HncVm3gapV6+PA4a5NrJVA2+M=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0/go.mod h1:CVYv1VaSYvQFmeRCDXvq0lfE+MjVuuxGqz8i6OYJGO8=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0 h1:njXy1jWZIB/FGgH5PuD7GEFijog+dIHKkCk0/KK3ie4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0/go.mod h1:AlwQ/GcxemglIOsq5Hwhhec65zB69KCwLF3ReL9fDXQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0 h1:QTTf31jE0IyIf1fjZSdSCEZXWPQh0RK6wlF6seGcGsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0/go.mod h1:YBQziYW63U+PpizgL6FdslXC4qTsB4azIn40ZTQHkkI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0 h1:aExPDvXM72YkrpUr/zzHisB2hthAglMyyMakXSoAjCI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0/go.mod h1:Bbb0kvytjDWKPuvw26nY/+FDqdtUEXNpwqoefS1efrI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0 h1:abeLe2WANVWpnNua41Aa+RTmYYGs0gk1oQRd2/XH7Uo=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0/go.mod h1:Nij85WmJr/+q0HeAvGulEYxFE+PMlhFelPWN6yzCuuw=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0 h1:XIgynRPC/r2x+pc+f2VbtAdBsueejnhA9zBE/bmXL/c=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0/go.mod h1:xVSuQG3cKqWa/jp7wLviJ00CEK0qU0HCp+a6u8G7m9c=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0 h1:ZL2MHUllnnScOOGUw47XxzXTPv2f9FD4iQMmpQ+Y97E=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0/go.mod h1:ethl7HjfN9VGHPWDrfiLAYHPrfUAYxk66tZT5841Uq8=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0 h1:f3PXc+bgwGtJIlWwtOXDYYNBKOyabhkLl6Q3j/Rc1ow=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0/go.mod h1:Ix1L1Godr9TS2DvHWSz/+0RBPa/D34azSvk/xU3dlGs=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0 h1:sYEmpMeBGMSoy8j1GSEkBO5GM578Qtq3QtmVcsYn51s=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0/go.mod h1:ULlXfiJdXw0ZzUGXLToQvGb1gQuIGC2BjtfyTnw3QtE=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0 h1:QTCu/YoA37p2Kf81Bc/h5TM70K8O+E/gMBc9vCkJrUQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0/go.mod h1:f3Lwdfnyzf7IY/gFXiRnSPMWGLZv17r+GJuTwnZkQL8=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0 h1:Aej9sL1v25Xf8AfM1fyRluBLV5g5+40GnagCb0/UJfY=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0/go.mod h1:QiXedkQif06vbjtVgnmmrHOunLUoLLAf10uaA/qKgts=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0 h1:DLVAun5qoedOzWZ1+yoZRuGj0RonhrGAqdAOO7k6k+A=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0/go.mod h1:betAqGGfDFb8SLyMYBwH9BQyB9wzxWOWXXC/Ht6/kas=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0 h1:Z6Y34pWmm/be0D5QCirBLEoMf7K9ObRPkMMD8bt4Ce0=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0/go.mod h1:LZ3Wbuyz2MNNAj3bT9u7QUt21glx2FWE26b0EjWKWnc=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0 h1:hJfn9iDpcmaeYCBJvwAhmH4prq2Rdr+hWizEIKWaXmg=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0/go.mod h1:tImtwJ0mSfqoPycKMDGFZcVBGi+8KnBTmBSMHsGSTkU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0 h1:Z4gWWk5N3ZBJlAx0hRm2sDUxlW8qK7dVoRbWMUnKiOM=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0/go.mod h1:FB+Xi0xKwEqTCshu2SGykG2LXRvg+5ZYR3jymz6+Mtw=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0 h1:+kM285dDDP69EfG6lB+A8nO98wtYrXT/afxlIzk5+IU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0/go.mod h1:X6/2QgHXQ73vvs1C5LEMyifUknLa71E27hUcbTY5vRo=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0 h1:9xbLur+qeUwlKDrx9LGI9fvypussD2E00q6QFkkGpGo=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0/go.mod h1:l58b3KahydKLOzt7S0s0NYBYH9Nm8tZ4w/GIVvOLCWU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0 h1:hc407moydGsK9FfAxjP3Tw+akhmKO8PfaH18II3N7Q4=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0/go.mod h1:+1IJOoUqBzghufMZDSMhKzs1UOi39h8pMFDxWm/k1k4=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.113.0 h1:ERdOiTmsDruI/s5oEgN45NsZW2roWXmO0u2aceR4GuM=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.113.0/go.mod h1:RkClsQhl8hdAg874Ot4kaG92s+6dW0Dvlt5HRxhsavc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0 h1:qudJNiKFfxly/lPyfdZNwnT6OKCzRFw0BI0E5CI6WwU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0/go.mod h1:eHVWQ484ohG4ZjaV8KTej3CMVEPh0w6zBXfi+qqvyGw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 h1:7A8MgFPYRQWq1RkFBktq01CW+eTYhiGML0IxQNv2uaM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0/go.mod h1:E1pc7mDXH+5s7RyXw291h8lz2dhzPzaDrAHqP1Lawvw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 h1:EZ/ZNsovNcQq+wwAbTAWNY+6BHnv24NxvVoC6eYmtg8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0/go.mod h1:u21dEQ9yQ0JyLMSrKLWWzHG/lHSlteNfa/EQ7Vqcle4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0 h1:462BO6mxAJKJdlqxs3swj9RtebQNeHXp2g7IK/N7+Zc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0/go.mod h1:aFfi8Vz+pIYXYxrx9rDP2Rhduac7mrjUYEAI/0GUIl4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 h1:SjzsWZqrxUoRWvXzwrbjQCnoJRJApJVCCqjrtflapMM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0/go.mod h1:sYIh0S63ztcL2q9gEKhvviDQ5caH1sFE1oeFRDQOQ6A=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0 h1:E/D5TwJyKN19p1FQ0XD5r5G1uH9NH/HVAM0e1hFMwFU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0/go.mod h1:FcClDm9XVV5tzUDzmH2Mhe6TfYiZ/3GSAQITnuCjZgg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.113.0 h1:ZQ7HYLrsbbfrIYybOVDG4d1IS4PfxuZEll7lvLmcYDs=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.113.0/go.mod h1:2/6/eY8Uvg+NfYDsAbND96A4u5q4UjcDlBJolYcj6jE=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0 h1:BidrOROxYyacsUzNJyPZdvuX9VpbmFnSJXAt0yz6cXU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0/go.mod h1:TM5DUkjqHozcpjCX36f7cDv6Rv+J8ysZ52zCYAEQZCk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 h1:V9CRl77lPG2xFPpnRf1QLiePo7FZngt+vw6M2KLdRMU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0/go.mod h1:zL+Msnlb1TEObHQ2RNnPKbVr3GhSdyI2ZqGtiSxg2/E=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0 h1:SVvBEMYFwb+vq/bNg9TVHNCaiFYb79B8Ce2z0/sWBgc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0/go.mod h1:lRfw7YDJE82nmdTO14Sk5rzkFJPHyH1iAnWIkjyQgQk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0 h1:gMaV3vZTFJU/B/g/2kKjbHn+LcIIsN4MhGSHi6/ZaFk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0/go.mod h1:iES2YMgH43z6KdlWnTWiZwWY3cyAL/GJOzCEbD7nGkI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0 h1:wKyHS2Vly/qhEEKYsKtRqprZko9hZd+jtmn3TAMrZZU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0/go.mod h1:lFOHzTWx4ozV2x/vRWBgu7gC0rkkX6EMdQkyIxLL2zI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0 h1:T3KHOKgNbWKD6gx5R2D4p3tg+0p4lIrxyf+4iy0Yxts=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0/go.mod h1:VqC1uSDXtgrEuqyyJuYp7G8Sr2FY2QtP3pN9a7cTueA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0 h1:5YU2trp7n56EyDS9dEyY1UxyaW6wxB4KiyKoyjDYooo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0/go.mod h1:EBtBK1lE/HMUz51cafBLlJAXZ/2ZDRCV4C+rT04fMYM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 h1:e2WOkaj5AWPaKTU4l+GEXGrEUbrAhQPQ7zLUdnXLGX8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0/go.mod h1:x+DR/o7xddbdhpQP2QKBJkPUdrj2tl/uR1OJ/sqlrWc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0 h1:GERkhEtH3Uk8CMBzFoBmMD7fBfcrtIM9hopbQqzdvNs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0/go.mod h1:+VbefhdCgKiTXsIU6sQj9L96Ow53a8EMcUW6EMt3zTA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 h1:Ux4k3jMTufk4HZ4RNYrqLxIt6wKEeOFAndzGWBjiUqs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0/go.mod h1:GWsSVmzpKZucOefuqqvKEUbnqGRV9OCSX2vzTjC/sbI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0 h1:n44G0Quw+OQMZ+ELOo/Aw1qcwVu7LXae8GBVjVSE+HQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0/go.mod h1:6dxGDpWsWnzC5UK3RhgfdXRC+3c8RLCO+cC+RiS+jIU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0 h1:Qy0MqQQKmW9wrfduM794WKg4qjTobIdj5HDHW5FZ/P8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0/go.mod h1:X25Nhlw6xhuNSd/C0FeEwmD4PGmcXDl7pa2jR0UREkU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0 h1:G8w+wg4nnqBqe297fBWnjJ5Tg2OYDVEMsdWA9/3ozxQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0/go.mod h1:m3hDVsXPQzQfeji3+hn7NYJPHDRlHhQRNd5T7N5wZqc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0 h1:mFYOvag34kGXceVj29k0ZpBUyjEX7VZq+KctUSfNiG0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0/go.mod h1:54P38b2i1CgHvZLxD3EAzVccqreamGEz2U4pqy9DuHw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 h1:vKtNSM3VQBTJx1ecf+I1iqn4kj7fKif1SpBLQ+numf8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0/go.mod h1:Iw3ndTvDCbmN6uugOfIqmfb1pgCWTYg+tXXS2rr3QJo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0 h1:XzI7y1bC29bnJ9VgyA0JCws0e/rIyt7yteT5gGLe6nw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0/go.mod h1:OxdhzDFd7/cRck6LeWLF2TUC/QhwoJSUlw35NuVbvzA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0 h1:4fkpWb4wG1RWr9C5M8IbxcBwq9qGlqf5bbp3bxRXOzA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0/go.mod h1:yAl+wNAt18sGPizvODIHue12echxjpFL2SEQvUksN5M=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0 h1:z8DT+vL/NfRN2hpacLIqtCMcInFrM01CY9LtoFJq+jQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0/go.mod h1:U0wBuG6Jz+DBzcPNCmRVZaZTXqaKC+RYo4eJiSKJwwk=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0 h1:VHt8tWPRPzPjl2AzO6tAd86yboX1UDDFkBm6oDVNAoI=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0/go.mod h1:r5DetWqG2vclbHNAYp4a+Kg5i7ZAfcRFez5bliTLDr0=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0 h1:+eYxV9vp6u8fKM+9acEJYGUa3SD1vJF776c/haougNQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0/go.mod h1:xSVeb2A5wmIuJ9Vak9UwPCP/yN1SDd+pBKfYHROW6YE=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0 h1:WN/zA6sCT4VzCA9CpRTGj6wiu17vIFozm/0yxNwKeGs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0/go.mod h1:sqWPNepjCX0+Ye++N9HwJjJ7KUAOkn4/ML/2GzrZquQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0 h1:bloEe7hK+H95zQ5WusQwQCILjudl6ljyR4kO95+Ocuo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0/go.mod h1:/vYbT5YZ/SyKGtbBtKCI00sGUk3Xd90A2hT5iSWP8Dk=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0 h1:zDScLkNf/llyiH1cjpVv5PhJAT5AcHIXCB35zW5jsbM=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0/go.mod h1:S+GR7FZJYtFBnbjgD737QImuvm8d4+PBccpI0Xrda4E=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0 h1:5cEQNFwYAn8PJ66l88lGLMSz9TYWiIUFayDwAtpJumw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0/go.mod h1:uSskqW6AAEHg/2yZ6hNo9V0OfQmM/wHP9lSNr2OSUU4=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0 h1:Syf4U5GrSl2fOGXBAChHrdSvMRBhi7BFiDwKbFkNo/8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0/go.mod h1:Q9shex5tQOoK4FeVx0NvYkwu18hCPFlRnwqqQzLfbpo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0 h1:l6UiNM2jEs+kBmsNt8qg2dEZpUVc8CLsvYksa9CZRDs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0/go.mod h1:Ky2VVQfIqv9ifden+amJv3sTi3Y/9u6rNMtq8cnVECs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0 h1:vgYhhliqQ6WUy5b1BE0ILJQKTweaLDPI5l/bUIunqLo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0/go.mod h1:UG/8zqyqbdN0HHkiWC7GZW4wFL4GIyRtsshc1RY8bGo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0 h1:c4vPI/PrRPnueoaKJKzBztsASIn5yZ7BT7uc4PHR39E=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0/go.mod h1:MR9VrhTtPazVAEWR/RPQln1i0Cmrc/6e2+zRpI/gwhM=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0 h1:F4rPll42bwVC2tuoyG0f0LmjfoBMc5eNT0j7iDtbDXk=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0/go.mod h1:GReM8iGTmB0zIH9A2vT3Ki5xP690A9RVycxB65dao38=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0 h1:9b7iQMFbA1rG9DVkepxN9qilmEYG5VaVb+meTsVEKBU=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0/go.mod h1:urzOE2U+Ax4Zho6VYFGOC/x1B4npKNDB6GLJ/F9k56I=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0 h1:oNEV5G5m59ekwho7BaiBdUbqWMAsneE6IFkVkiZY4Yg=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0/go.mod h1:tULSPMh5LZ9UJZa5QgAd7okycfM0x28AoWhtRt7DNvw=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0 h1:qPUFbh7d9Ddiyky8F4by+KRUUksqMiO+gFDXGkaxevw=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0/go.mod h1:e+cVHDHttCojcC8iyBcDFtfK3JWZlqaDc+WCTl5sEdo=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0 h1:frNZmJqTjdGO4vkxM2LN5URbddpD+R8taOCtDz3JJiQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0/go.mod h1:qFJOAhv+agSMdJSmsIW4QDsTWIdWo8NRZvY3OV2iWV8=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0 h1:tIJu6tCPiZKK6FiBfn2ritlwMSrjwS4iNTI0u02J/ns=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0/go.mod h1:cI/ZaTpiY6QDTihTwSKXgtsWXwSPr4Bpb95CjA1LO5Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0 h1:Azx7wP6Me2iXr6h2bTqbRjtxB6HnXN9QpYECLu/eQ8I=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0/go.mod h1:KezOwO7COPCsZnE8ECCrWvAywUhTZMYtJx7H36JguoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -753,6 +749,8 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -775,8 +773,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
-github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -788,8 +786,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
-github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
+github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -804,8 +802,8 @@ github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -820,14 +818,14 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
-github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
-github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
+github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM=
+github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
-github.com/signalfx/sapm-proto v0.14.0 h1:KWh3I5E4EkelB19aP1/54Ik8khSioC/RVRW/riOfRGg=
-github.com/signalfx/sapm-proto v0.14.0/go.mod h1:Km6PskZh966cqNoUn3AmRyGRix5VfwnxVBvn2vjRC9U=
+github.com/signalfx/sapm-proto v0.16.0 h1:E8W+awZBl3nmpDTdbPK8Uwla9FdSCWpZChR3p+7bzw0=
+github.com/signalfx/sapm-proto v0.16.0/go.mod h1:7VTAIoYIgkAK+j6w3l4Aici+EYySGAmXCK0rfD2OZkU=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -870,8 +868,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
-github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U=
-github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI=
+github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo=
+github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ=
github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo=
github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@@ -882,8 +880,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I
github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8=
github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4=
github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E=
-github.com/tinylib/msgp v1.2.2 h1:iHiBE1tJQwFI740SPEPkGE8cfhNfrqOYRlH450BnC/4=
-github.com/tinylib/msgp v1.2.2/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
+github.com/tinylib/msgp v1.2.3 h1:6ryR/GnmkqptS/HSe6JylgoKASyBKefBQnvIesnyiV4=
+github.com/tinylib/msgp v1.2.3/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
@@ -912,162 +910,180 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
-go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0 h1:XwHBWCP0m/d6YZ0VZltzVvnz5hDB9ik7sPRjJIdmjUk=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0/go.mod h1:K9OLwZM8dGNL1Jul/FGxlRsnLd1umgDyA+yxq2BNXUs=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/confignet v1.17.0 h1:cBmDdiPuIVrHiecgCKyXhRYmDOz9Do5IM7O1JhbB3es=
-go.opentelemetry.io/collector/config/confignet v1.17.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 h1:KH0ABOBfSPp5XZtHkoXeI9wKoOD9B0eN6TDo08SwN/c=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0/go.mod h1:jyFbV9hLrYJf2zNjqcpzkzB6zmPj/Ohr+S+vmPuxyMY=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 h1:R/U0uWAyppNrxvF+piqhnhcrPSNz3wnwHyEIRCbrmh0=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0/go.mod h1:3mtUk7wwDQyPUsHtCOLi2v0uSZWfC00BhOhqHs4CWs4=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 h1:FtKwwHS8NSNJWrhE7JsFlYhe+2GojENfOQbhQMSTyRo=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0/go.mod h1:9/R8ucfVQEEEHMv9b7M6rSB8nF2k+MfIO93vbDEsaMU=
-go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro=
-go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 h1:KiypGuW+JG1gV9l6pvSEIMKwn+MLJn0Ol62HMe5ytr4=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0/go.mod h1:7ihw3KDcvrY5kXIRNxB64Pz6kguf5Q0x9mJAvbBLT5Y=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/exporter/nopexporter v0.111.0 h1:CRgqzloeVAZDnjJ+ayfqOcQZ6uREf6O65NCHV4LqGcY=
-go.opentelemetry.io/collector/exporter/nopexporter v0.111.0/go.mod h1:Mw/hi2MVqUt3QEmxjOWcomICZi7Jx/31tCmr5l0T2+o=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 h1:eOyd1InTuymfIP4oMzJki28JjpGQzOEK6Y0YlI6pwgA=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0/go.mod h1:nOUveQ4KWFqlCA6b0L5DXMosZCcNtit8abEuLHwBaUM=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0 h1:e7UHbu515LfkFKHdXdOvz0gQP6jXD+uuoKs1PRXHEw0=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0/go.mod h1:0+TSVnAEw9hyF34b0eu36IFVLpAgpxOugAI2ZgNPX18=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY=
-go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws=
-go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
-go.opentelemetry.io/collector/filter v0.111.0 h1:OUE1wKch/C5AfF/TUpMWbKTSYYvSUlNPHADD0c8Also=
-go.opentelemetry.io/collector/filter v0.111.0/go.mod h1:74Acew42eexKiuLu3tVehyMK4b5XJPWXoJyNjK2FM+U=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk=
-go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0 h1:kiXvbIR1K8Tcv10ffaA9MvcPoGpm6uitaXzfhDZnV3o=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0/go.mod h1:7jwDuhMkglGVSyJT6CQ1vE7A6fjYTvbap7/QVl3P8kQ=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 h1:JoBjX0LjmQ3n22o54sxAN9T6sgxumBLDqq0RElvYAVc=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0/go.mod h1:8Dw89aInFh4dX3A0iyIcpbQ1A/8hVWtxjrJKyAOb9TQ=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0 h1:Y5gMjXn6bbMiOgiGSaWvOFb4jbCVraG1/GjQsJjCEMI=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0/go.mod h1:s42Gm7LMqietFs0Cpl+ma2sEYZP3RWHIlXlWimGW2cQ=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0 h1:JWg6F//9AH34KVL1RkRVpcyJpbzIWMtpCLxggeo3gsY=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0/go.mod h1:FpiGrlkIhMh9gNzaw29m5zhSkRRruZnwB2RyGI0yCsw=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0 h1:VsQ55DvHvjYop+wbpY6qCSF0cfoMNMZEd0pANa5l+9Y=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0/go.mod h1:/zUX2GHa7CIeqGRl+hpQk3zQ1QCaUpBK42XGqrXAbzQ=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
-go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
+go.opentelemetry.io/collector v0.113.0 h1:dBuo2/OKBhoMCR86W4fFJLXGQ0gJfKRmi65AZwFkU2I=
+go.opentelemetry.io/collector v0.113.0/go.mod h1:XbjD4Yw9LunLo3IJu3ZZytNZ0drEVznxw1Z14Ujlw3s=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0 h1:rNbRd033JlIeU+TH+3bEt4OwRlEwrktWdf6V+VUJUPk=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0/go.mod h1:InXxPUj1oxJ57Sl954d2tQxXTgVHhfppFYjMwGjQukg=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0 h1:f8O/I5pVRN86Gx5mHekNx92S6fGdOS4VcooRJKWe6Bs=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0/go.mod h1:AiaW5YW1LD0/WlZuc8eZuZPBH6PA9QqsiAYRX1iC6T0=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0 h1:TYwyk4ea3U+5MYcEjrzZAaonBcLlabQu8CZeB7ekAYY=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0/go.mod h1:i3mL4OSGI5JM0hnzHujhJK+LDlvO3XrJxBsuclfU/jY=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0 h1:a077jcs3DVtaVdmgmCk3x4rRYuTkIqMDsoUc+VICHZk=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0/go.mod h1:HjYkzhHbwUacv27nq0JLsslGpbtrXyyfU30Oc72AWLU=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0 h1:oV66DKiEdAt8EMZqGSChK2iEOxjrVaWRhf4OqqmqjbM=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0/go.mod h1:jtNUdO6i1k38BG7vFst+d1jk/N+c419uVR8HB4J0VjI=
+go.opentelemetry.io/collector/connector v0.113.0 h1:ii+s1CjsLxtglqRlFs6tv8UU/uX45dyN9lbTRbR0p8g=
+go.opentelemetry.io/collector/connector v0.113.0/go.mod h1:KmA8eQouTVxVk65Bf6utuMjWovMuOvNVRcYV60CAGtc=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 h1:yAEKTxVGpBtHrrXeZFrBcVOQkduxCncH0o4hqKrDRyw=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0/go.mod h1:+mwzwIZ1cEK29ByfC38uF8hmFO8Wf9ShT1c756XX+RI=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0 h1:WHekoL0izkrKLVQLv79v0QhqfnXkVcw0sgdF07EqWLM=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0/go.mod h1:KouywNfkxRf+yzbI2pdolzTLkLoCV4ASEI2o2pDt+Cg=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 h1:2kLIt+6dGmhCd48CWXh3IEon/uW4+c8y81IGCA/h8wE=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0/go.mod h1:/eESy7Ifyf7G6r6WUpEOq2tnfjIJ2QNB2EvZcEu0aWA=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0 h1:iShn3SuSpx78YLgl7fQCJsHLs7z0RDtbN58/Amoy5xc=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0/go.mod h1:O1dLnYA81a+ZecBD89vjZSBgkAnhnfXwsmYsE7LP2/s=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 h1:Auz2vZYReIlyDvJ162OCO8XcV7L2BIbFb5HJWxerc5A=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0/go.mod h1:JQuawcAfDuzNneDF5Ep1CZJ5snsLp6Bh1gZcHhja7yU=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/exporter/nopexporter v0.113.0 h1:DClFr8PNUc+f8fciNK3Sdj+ydCgZRc2zVk+1WCLyPfU=
+go.opentelemetry.io/collector/exporter/nopexporter v0.113.0/go.mod h1:RGn9QUUOldcD19yKyg5e6dBjy/o//RaWGOhkS6azhqo=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0 h1://7diunG5SohqaYfqvHzCtcfrY7y3WQj0vklFYgeNW4=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0/go.mod h1:THF0eq4lA6dYOho53iKFCBOv91HEeISZyep5dXr+fBU=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0 h1:22Srn4V6ogOdi4Bn6eKtKqAidWyjPkYKYDR3Xq91nFY=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0/go.mod h1:BRA54WRyPS9RYDIUEGxxJvxJ/uZ66++bCFPHliDstCQ=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 h1:GuJzpnrJtsMrKWGmb1VL4EqL6x1HDtZmtvy3yEjth6Y=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0/go.mod h1:oa72qndu7nAfEeEpDyDi9qLcaSJGIscLc/eeojFADx0=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0 h1:b/Clxso9uVwLVYjvRQ1NGBWHpUEZ/++uA5sJbBj0ryo=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0/go.mod h1:5csGYy9Ydfy6Hpw3Tod864P6HUEZpA6UiuPJPG3TjSU=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/filter v0.113.0 h1:5ODwM8QEOzZq08H8DJilBa4PHieXpBreJVKZ0D2YshA=
+go.opentelemetry.io/collector/filter v0.113.0/go.mod h1:Mh3N6cpVijdamUJj1tAgSU1RG/Ek4FuY2ODKYxKZDtk=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 h1:Beu2zAN6/EDXQ6hMFU6FT1BsnU5FXmWNOlfTAhrgbGc=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0/go.mod h1:WUXbc4L6KJ3SpmsxBgId0OYzRDuS7n274kNpqrgnSmY=
+go.opentelemetry.io/collector/internal/memorylimiter v0.113.0 h1:qe3xZYB4BgSuPDgFMQbcJ5gDy8t+S1vt6pL+OKrdx9E=
+go.opentelemetry.io/collector/internal/memorylimiter v0.113.0/go.mod h1:Eo/XZsFPS1mo0DHnAaVeyPNFn3HKVXB2nog++b3CnRc=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 h1:a4gT+t+rboCaH70anhu+ZQp9IJ7UjVeZxZJvxTBgCqU=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0/go.mod h1:6WDDyjI4pbtfUmtv/JKLs7OwieEEvaDVb3Zcc4oA9Vg=
+go.opentelemetry.io/collector/otelcol v0.113.0 h1:t32gA8Pg9lsqYQml4mgvHdqFSsjvPZMvGywi0zXQYjk=
+go.opentelemetry.io/collector/otelcol v0.113.0/go.mod h1:PV6pDLPEaulRs3ceWYNEDuG5100F35I5VzeC2ekT/vY=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0 h1:bfu9oQQbO6KEcpgh7muc1ixsGQs+qFWwi9LyabGILqw=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0/go.mod h1:0bag/J2REbnIKKKHvYe0RqyjmsUv4OJH14kNef+lD4Q=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 h1:PwQnErsLvEd1x6VIyjLmKQot9huKWqIfEz1kd+8aj4k=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0/go.mod h1:tChJYsCG3wc6JPT9aJO3y+32V14NhmCFZOh3k5ORGdQ=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTFIi8ziIa2idZ5MRlBIf4Wr72gZNgXb0=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 h1:3/5z0Pe/yduwF0DSpytW2+mwDA5JaIL/w6vfNYy5KzQ=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0/go.mod h1:h3wIlqMtJGIDKttjMJBo6J4dHU/Mi6+bKSxvRVUpsXs=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0 h1:Kgan6/DCH1YZzOztXPPair+V2czPmrJxxrIIxLVYmn4=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0/go.mod h1:1nVoRLC/antEw4gvcyaRBT3aBt7nh3KBASWLLhmm0Ts=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0 h1:yhnj8kmh1IQ4g6fIWvhum/wYPUU2WmRpQuy1iSvf4e4=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0/go.mod h1:3OB+oJlOb1rlLLdBwxae4g2Qh5C97Eg17HVveIddUCw=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/service v0.113.0 h1:SFT+kWuj9TehC34MLEWfXH6QocGl3MYqLJ7UbxZtWzM=
+go.opentelemetry.io/collector/service v0.113.0/go.mod h1:6+JY80Yd4J4RWpvRmpCUUZFOZKGVs9a1QKCKPlDrKfs=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw=
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
-go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
-go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0=
-go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8=
-go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
-go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
-go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI=
-go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE=
-go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
-go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ=
+go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko=
+go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 h1:HZgBIps9wH0RDrwjrmNa3DVbNRW60HEhdzqZFyAp3fI=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0/go.mod h1:RDRhvt6TDG0eIXmonAx5bd9IcwpqCkziwkOClzWKwAQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
+go.opentelemetry.io/otel/log v0.7.0 h1:d1abJc0b1QQZADKvfe9JqqrfmPYQCz2tUSO+0XZmuV4=
+go.opentelemetry.io/otel/log v0.7.0/go.mod h1:2jf2z7uVfnzDNknKTO9G+ahcOAyWcp1fJmk/wJjULRo=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ=
+go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -1450,10 +1466,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1484,8 +1500,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1525,15 +1541,15 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E=
-k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
-k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
+k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
+k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
-k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
-k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
+k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
-k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
-k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
+k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
+k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
diff --git a/comp/otelcol/collector-contrib/impl/manifest.yaml b/comp/otelcol/collector-contrib/impl/manifest.yaml
index ee7f2058d4d4b..1ec7c7287cea2 100644
--- a/comp/otelcol/collector-contrib/impl/manifest.yaml
+++ b/comp/otelcol/collector-contrib/impl/manifest.yaml
@@ -1,72 +1,87 @@
+connectors:
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector
+ v0.113.0
dist:
+ description: Datadog OpenTelemetry Collector
module: github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib
name: otelcol-contrib
- description: Datadog OpenTelemetry Collector
- version: 0.111.0
output_path: ./comp/otelcol/collector-contrib/impl
- otelcol_version: 0.111.0
-
-extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0
-
+ version: 0.113.0
exporters:
- - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0
-
+- gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.113.0
+- gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.113.0
+- gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+- gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter
+ v0.113.0
+extensions:
+- gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver
+ v0.113.0
processors:
- - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0
-
+- gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+- gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor
+ v0.113.0
providers:
- - gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0
-
+- gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0
+- gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0
+- gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0
+- gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0
+- gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0
receivers:
- - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0
- - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0
-
-connectors:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0
-
-# When adding a replace, add a comment before it to document why it's needed and when it can be removed
+- gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0
+- gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator
+ v0.113.0
+- gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver
+ v0.113.0
replaces:
- # See https://github.com/google/gnostic/issues/262
- - github.com/googleapis/gnostic v0.5.6 => github.com/googleapis/gnostic v0.5.5
- # See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/12322#issuecomment-1185029670
- - github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 => github.com/docker/go-connections v0.4.0
- # see https://github.com/mattn/go-ieproxy/issues/45
- - github.com/mattn/go-ieproxy => github.com/mattn/go-ieproxy v0.0.1
- # see https://github.com/openshift/api/pull/1515
- - github.com/openshift/api => github.com/openshift/api v0.0.0-20230726162818-81f778f3b3ec
- - github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def => ../def
+- github.com/googleapis/gnostic v0.5.6 => github.com/googleapis/gnostic v0.5.5
+- github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 => github.com/docker/go-connections
+ v0.4.0
+- github.com/mattn/go-ieproxy => github.com/mattn/go-ieproxy v0.0.1
+- github.com/openshift/api => github.com/openshift/api v0.0.0-20230726162818-81f778f3b3ec
+- github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def => ../def
diff --git a/comp/otelcol/collector/impl-pipeline/pipeline.go b/comp/otelcol/collector/impl-pipeline/pipeline.go
index 6b0fd740a7587..a0f72266efb8a 100644
--- a/comp/otelcol/collector/impl-pipeline/pipeline.go
+++ b/comp/otelcol/collector/impl-pipeline/pipeline.go
@@ -18,7 +18,7 @@ import (
flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
"github.com/DataDog/datadog-agent/comp/core/status"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
compdef "github.com/DataDog/datadog-agent/comp/def"
"github.com/DataDog/datadog-agent/comp/metadata/inventoryagent"
collector "github.com/DataDog/datadog-agent/comp/otelcol/collector/def"
diff --git a/comp/otelcol/collector/impl/collector.go b/comp/otelcol/collector/impl/collector.go
index 57ecd6462a1b0..79ae0343ff241 100644
--- a/comp/otelcol/collector/impl/collector.go
+++ b/comp/otelcol/collector/impl/collector.go
@@ -29,7 +29,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/config"
corelog "github.com/DataDog/datadog-agent/comp/core/log/def"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util"
compdef "github.com/DataDog/datadog-agent/comp/def"
collectorcontrib "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def"
@@ -139,7 +139,7 @@ func addFactories(reqs Requires, factories otelcol.Factories) {
}
var buildInfo = component.BuildInfo{
- Version: "v0.111.0",
+ Version: "v0.113.0",
Command: filepath.Base(os.Args[0]),
Description: "Datadog Agent OpenTelemetry Collector",
}
diff --git a/comp/otelcol/converter/def/go.mod b/comp/otelcol/converter/def/go.mod
index 57c352f9295d9..2fd9ed2099778 100644
--- a/comp/otelcol/converter/def/go.mod
+++ b/comp/otelcol/converter/def/go.mod
@@ -12,7 +12,7 @@ require (
github.com/knadh/koanf/v2 v2.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
- go.opentelemetry.io/collector/featuregate v1.11.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/comp/otelcol/converter/def/go.sum b/comp/otelcol/converter/def/go.sum
index 0992f397b0a31..d458e07b68f14 100644
--- a/comp/otelcol/converter/def/go.sum
+++ b/comp/otelcol/converter/def/go.sum
@@ -26,8 +26,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA=
go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w=
-go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY=
-go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
diff --git a/comp/otelcol/converter/fx/fx.go b/comp/otelcol/converter/fx/fx.go
index 750c005e24e91..f49a021841fee 100644
--- a/comp/otelcol/converter/fx/fx.go
+++ b/comp/otelcol/converter/fx/fx.go
@@ -15,7 +15,7 @@ import (
// Module defines the fx options for this component.
func Module() fxutil.Module {
return fxutil.Component(
- fxutil.ProvideComponentConstructor(converterimpl.NewConverter),
+ fxutil.ProvideComponentConstructor(converterimpl.NewConverterForAgent),
fxutil.ProvideOptional[converter.Component](),
)
}
diff --git a/comp/otelcol/converter/impl/autoconfigure.go b/comp/otelcol/converter/impl/autoconfigure.go
index 6158bb74031b0..af0eb118c008c 100644
--- a/comp/otelcol/converter/impl/autoconfigure.go
+++ b/comp/otelcol/converter/impl/autoconfigure.go
@@ -10,8 +10,9 @@ import (
"regexp"
"strings"
- "github.com/DataDog/datadog-agent/comp/core/config"
"go.opentelemetry.io/collector/confmap"
+
+ "github.com/DataDog/datadog-agent/comp/core/config"
)
var ddAutoconfiguredSuffix = "dd-autoconfigured"
@@ -115,6 +116,9 @@ func addComponentToPipeline(conf *confmap.Conf, comp component, pipelineName str
*conf = *confmap.NewFromStringMap(stringMapConf)
}
+// addCoreAgentConfig enhances the configuration with information about the core agent.
+// For example, if api key is not found in otel config, it can be retrieved from core
+// agent config instead.
func addCoreAgentConfig(conf *confmap.Conf, coreCfg config.Component) {
stringMapConf := conf.ToStringMap()
exporters, ok := stringMapConf["exporters"]
@@ -152,7 +156,8 @@ func addCoreAgentConfig(conf *confmap.Conf, coreCfg config.Component) {
}
}
}
-
+ // this is the only reference to Requires.Conf
+ // TODO: add logic to either fail or log message if api key not found
if coreCfg != nil {
apiMap["key"] = coreCfg.Get("api_key")
diff --git a/comp/otelcol/converter/impl/converter.go b/comp/otelcol/converter/impl/converter.go
index 4488c222763d3..b4a73691661c8 100644
--- a/comp/otelcol/converter/impl/converter.go
+++ b/comp/otelcol/converter/impl/converter.go
@@ -9,6 +9,8 @@ package converterimpl
import (
"context"
+ "go.uber.org/zap"
+
"go.opentelemetry.io/collector/confmap"
"github.com/DataDog/datadog-agent/comp/core/config"
@@ -17,6 +19,7 @@ import (
type ddConverter struct {
coreConfig config.Component
+ logger *zap.Logger
}
var (
@@ -28,13 +31,27 @@ var (
// An agent core configuration component dep is expected. A nil
// core config component will prevent enhancing the configuration
// with core agent config elements if any are missing from the provided
-// OTel configutation.
+// OTel configuration. For example, when building in an environment that
+// requires an argument-less constructor, such as with ocb. In this case,
+// the core config component is not available and the converter will not
+// attempt to enhance the configuration using agent data.
type Requires struct {
Conf config.Component
}
-// NewConverter currently only supports a single URI in the uris slice, and this URI needs to be a file path.
-func NewConverter(reqs Requires) (converter.Component, error) {
+// NewFactory returns a new converter factory.
+func NewFactory() confmap.ConverterFactory {
+ return confmap.NewConverterFactory(newConverter)
+}
+
+func newConverter(set confmap.ConverterSettings) confmap.Converter {
+ return &ddConverter{
+ logger: set.Logger,
+ }
+}
+
+// NewConverterForAgent currently only supports a single URI in the uris slice, and this URI needs to be a file path.
+func NewConverterForAgent(reqs Requires) (converter.Component, error) {
return &ddConverter{
coreConfig: reqs.Conf,
}, nil
diff --git a/comp/otelcol/converter/impl/converter_test.go b/comp/otelcol/converter/impl/converter_test.go
index a86cba83d6347..b742e066059fe 100644
--- a/comp/otelcol/converter/impl/converter_test.go
+++ b/comp/otelcol/converter/impl/converter_test.go
@@ -17,6 +17,7 @@ import (
"go.opentelemetry.io/collector/confmap/provider/httpprovider"
"go.opentelemetry.io/collector/confmap/provider/httpsprovider"
"go.opentelemetry.io/collector/confmap/provider/yamlprovider"
+ "go.uber.org/zap"
)
func uriFromFile(filename string) []string {
@@ -37,8 +38,8 @@ func newResolver(uris []string) (*confmap.Resolver, error) {
})
}
-func TestNewConverter(t *testing.T) {
- _, err := NewConverter(Requires{})
+func TestNewConverterForAgent(t *testing.T) {
+ _, err := NewConverterForAgent(Requires{})
assert.NoError(t, err)
}
@@ -147,7 +148,7 @@ func TestConvert(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- converter, err := NewConverter(Requires{})
+ converter, err := NewConverterForAgent(Requires{})
assert.NoError(t, err)
resolver, err := newResolver(uriFromFile(tc.provided))
@@ -162,6 +163,27 @@ func TestConvert(t *testing.T) {
confResult, err := resolverResult.Resolve(context.Background())
assert.NoError(t, err)
+ assert.Equal(t, confResult.ToStringMap(), conf.ToStringMap())
+ })
+ }
+ // test using newConverter function to simulate ocb environment
+ nopLogger := zap.NewNop()
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ converter := newConverter(confmap.ConverterSettings{Logger: nopLogger})
+
+ resolver, err := newResolver(uriFromFile(tc.provided))
+ assert.NoError(t, err)
+ conf, err := resolver.Resolve(context.Background())
+ assert.NoError(t, err)
+
+ converter.Convert(context.Background(), conf)
+
+ resolverResult, err := newResolver(uriFromFile(tc.expectedResult))
+ assert.NoError(t, err)
+ confResult, err := resolverResult.Resolve(context.Background())
+ assert.NoError(t, err)
+
assert.Equal(t, confResult.ToStringMap(), conf.ToStringMap())
})
}
diff --git a/comp/otelcol/converter/impl/go.mod b/comp/otelcol/converter/impl/go.mod
index dde5215bdda1f..256db17fb9fee 100644
--- a/comp/otelcol/converter/impl/go.mod
+++ b/comp/otelcol/converter/impl/go.mod
@@ -42,12 +42,13 @@ require (
github.com/DataDog/datadog-agent/comp/core/config v0.56.2
github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/confmap/provider/envprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/fileprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/httpprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.111.0
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0
+ go.uber.org/zap v1.27.0
)
@@ -60,9 +61,9 @@ require (
github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect
github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect
github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect
- github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect
- github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/executable v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect
github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 // indirect
@@ -80,7 +81,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -117,7 +118,6 @@ require (
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.22.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
diff --git a/comp/otelcol/converter/impl/go.sum b/comp/otelcol/converter/impl/go.sum
index 26b872fdb6941..7227dbe9c24bd 100644
--- a/comp/otelcol/converter/impl/go.sum
+++ b/comp/otelcol/converter/impl/go.sum
@@ -59,8 +59,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -236,18 +236,18 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/confmap/provider/envprovider v0.111.0 h1:Ip2KeNRdF6alndCXE6QrxUyq8LUTh+1IvaANEI95pSo=
-go.opentelemetry.io/collector/confmap/provider/envprovider v0.111.0/go.mod h1:jyFbV9hLrYJf2zNjqcpzkzB6zmPj/Ohr+S+vmPuxyMY=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v0.111.0 h1:4u+jXuvV20sBQOEzDlXlo7tT5dC+rHqjxW+JaPwl8W8=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v0.111.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v0.111.0 h1:CacNmiuJvTarb5B0u0z5GhqjPZ3gG7QRe8W4TVIF3ho=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v0.111.0/go.mod h1:3mtUk7wwDQyPUsHtCOLi2v0uSZWfC00BhOhqHs4CWs4=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0 h1:uNMlftoTRqjavhoGY2LvUc4z0+lDht1UHrvj856skRU=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0/go.mod h1:1Vhweh5dDeTUOmcw5WSGHPgHUwZzouf3y2dQr4yFWjA=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.111.0 h1:59HCXS17PbmBk0MeL6CqaIZDsWtY/DDVT9jHpd35aL0=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.111.0/go.mod h1:9/R8ucfVQEEEHMv9b7M6rSB8nF2k+MfIO93vbDEsaMU=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0 h1:f8O/I5pVRN86Gx5mHekNx92S6fGdOS4VcooRJKWe6Bs=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0/go.mod h1:AiaW5YW1LD0/WlZuc8eZuZPBH6PA9QqsiAYRX1iC6T0=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0 h1:TYwyk4ea3U+5MYcEjrzZAaonBcLlabQu8CZeB7ekAYY=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0/go.mod h1:i3mL4OSGI5JM0hnzHujhJK+LDlvO3XrJxBsuclfU/jY=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0 h1:a077jcs3DVtaVdmgmCk3x4rRYuTkIqMDsoUc+VICHZk=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0/go.mod h1:HjYkzhHbwUacv27nq0JLsslGpbtrXyyfU30Oc72AWLU=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0 h1:8LoQxjlduFQUEwYuHWnxEj0A+GcAtpv2qPpDJVz7A5E=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0/go.mod h1:Y8ErEl5m9+1AWzWcMn52PATH5dw50wuyyPMffK62RCI=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0 h1:oV66DKiEdAt8EMZqGSChK2iEOxjrVaWRhf4OqqmqjbM=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0/go.mod h1:jtNUdO6i1k38BG7vFst+d1jk/N+c419uVR8HB4J0VjI=
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
diff --git a/comp/otelcol/ddflareextension/def/go.mod b/comp/otelcol/ddflareextension/def/go.mod
index 5c928c96c4769..620a2cdcbf10c 100644
--- a/comp/otelcol/ddflareextension/def/go.mod
+++ b/comp/otelcol/ddflareextension/def/go.mod
@@ -6,12 +6,12 @@ require go.opentelemetry.io/collector/extension v0.111.0
require (
github.com/gogo/protobuf v1.3.2 // indirect
- go.opentelemetry.io/collector/component v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/trace v1.30.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/net v0.30.0 // indirect
@@ -19,5 +19,5 @@ require (
golang.org/x/text v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
)
diff --git a/comp/otelcol/ddflareextension/def/go.sum b/comp/otelcol/ddflareextension/def/go.sum
index a93a24f2ff713..061c05369625e 100644
--- a/comp/otelcol/ddflareextension/def/go.sum
+++ b/comp/otelcol/ddflareextension/def/go.sum
@@ -16,20 +16,20 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -71,7 +71,7 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/comp/otelcol/ddflareextension/impl/configstore_test.go b/comp/otelcol/ddflareextension/impl/configstore_test.go
index 4d9c4ccb3de8f..b1b79d47df3ba 100644
--- a/comp/otelcol/ddflareextension/impl/configstore_test.go
+++ b/comp/otelcol/ddflareextension/impl/configstore_test.go
@@ -198,7 +198,7 @@ func newResolverSettings(uris []string, enhanced bool) confmap.ResolverSettings
func newConverterFactory(enhanced bool) []confmap.ConverterFactory {
converterFactories := []confmap.ConverterFactory{}
- converter, err := converterimpl.NewConverter(converterimpl.Requires{})
+ converter, err := converterimpl.NewConverterForAgent(converterimpl.Requires{})
if err != nil {
return []confmap.ConverterFactory{}
}
diff --git a/comp/otelcol/ddflareextension/impl/factory_test.go b/comp/otelcol/ddflareextension/impl/factory_test.go
index aea2fffda2995..1d07dfd566621 100644
--- a/comp/otelcol/ddflareextension/impl/factory_test.go
+++ b/comp/otelcol/ddflareextension/impl/factory_test.go
@@ -29,7 +29,7 @@ func TestNewFactoryForAgent(t *testing.T) {
cfg := factory.CreateDefaultConfig()
assert.NotNil(t, cfg)
- ext, err := factory.CreateExtension(context.Background(), extension.Settings{}, cfg)
+ ext, err := factory.Create(context.Background(), extension.Settings{}, cfg)
assert.NoError(t, err)
assert.NotNil(t, ext)
@@ -45,8 +45,7 @@ func TestTypeStability(t *testing.T) {
assert.Equalf(t, typ, metadata.Type,
"Factory type is %v expected it to be %x", typ, metadata.Type)
- stability := factory.ExtensionStability()
+ stability := factory.Stability()
assert.Equalf(t, stability, metadata.ExtensionStability,
"Factory stability is %v expected it to be %x", stability, metadata.ExtensionStability)
-
}
diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod
index c123472c571df..2122375fde34b 100644
--- a/comp/otelcol/ddflareextension/impl/go.mod
+++ b/comp/otelcol/ddflareextension/impl/go.mod
@@ -13,6 +13,7 @@ replace (
github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../core/log/mock
github.com/DataDog/datadog-agent/comp/core/secrets => ../../../core/secrets
github.com/DataDog/datadog-agent/comp/core/status => ../../../core/status
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags => ../../../core/tagger/tags
github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../core/tagger/types
github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../core/tagger/utils
github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../core/telemetry
@@ -96,10 +97,10 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../../../pkg/util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector => github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0
)
require (
@@ -108,48 +109,63 @@ require (
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.59.0
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.59.0
github.com/DataDog/datadog-agent/pkg/api v0.57.1
- github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0
- github.com/DataDog/datadog-agent/pkg/version v0.57.1
+ github.com/DataDog/datadog-agent/pkg/config/mock v0.58.1
+ github.com/DataDog/datadog-agent/pkg/version v0.58.1
github.com/google/go-cmp v0.6.0
github.com/gorilla/mux v1.8.1
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/component/componentstatus v0.111.0
- go.opentelemetry.io/collector/config/confighttp v0.111.0
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0
- go.opentelemetry.io/collector/connector v0.111.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0
- go.opentelemetry.io/collector/extension v0.111.0
- go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0
- go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- go.opentelemetry.io/collector/otelcol v0.111.0
- go.opentelemetry.io/collector/processor v0.111.0
- go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- go.opentelemetry.io/collector/receiver v0.111.0
- go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0
- go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0
+ go.opentelemetry.io/collector/config/confighttp v0.113.0
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0
+ go.opentelemetry.io/collector/connector v0.113.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+ go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0
+ go.opentelemetry.io/collector/extension v0.113.0
+ go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0
+ go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ go.opentelemetry.io/collector/otelcol v0.113.0
+ go.opentelemetry.io/collector/processor v0.113.0
+ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+ go.opentelemetry.io/collector/receiver v0.113.0
+ go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0
+ go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
)
+require (
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
+ go.opentelemetry.io/collector/connector/connectortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect
+)
+
require (
cloud.google.com/go/auth v0.7.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
- cloud.google.com/go/compute/metadata v0.5.0 // indirect
+ cloud.google.com/go/compute/metadata v0.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
@@ -158,22 +174,23 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Code-Hex/go-generics-cache v1.5.1 // indirect
- github.com/DataDog/agent-payload/v5 v5.0.123 // indirect
- github.com/DataDog/datadog-agent/comp/core/config v0.57.1 // indirect
- github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect
- github.com/DataDog/datadog-agent/comp/core/flare/types v0.57.1 // indirect
- github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/comp/core/log/def v0.58.0-devel // indirect
+ github.com/DataDog/agent-payload/v5 v5.0.134 // indirect
+ github.com/DataDog/datadog-agent/comp/core/config v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/flare/builder v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/flare/types v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/log/def v0.58.1 // indirect
github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel // indirect
- github.com/DataDog/datadog-agent/comp/core/secrets v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/secrets v0.58.1 // indirect
github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 // indirect
github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect
- github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1 // indirect
- github.com/DataDog/datadog-agent/comp/def v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/comp/core/telemetry v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/comp/def v0.58.1 // indirect
github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/comp/logs/agent/config v0.58.1 // indirect
github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.59.0-rc.6 // indirect
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.59.0-rc.6 // indirect
@@ -192,53 +209,54 @@ require (
github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0-rc.6 // indirect
github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0-rc.6 // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect
- github.com/DataDog/datadog-agent/pkg/config/utils v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/utils v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/auditor v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/client v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/message v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/metrics v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/processor v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sds v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sender v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sources v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/obfuscate v0.59.0-devel.0.20240911192058-0c2181220f85 // indirect
github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/proto v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/status/health v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/telemetry v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/trace v0.59.0-devel.0.20240911192058-0c2181220f85 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/backoff v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/cgroups v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/executable v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/executable v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/filesystem v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/fxutil v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/http v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/log v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/log v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect
- github.com/DataDog/datadog-agent/pkg/util/optional v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/system v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
- github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/startstop v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/statstracker v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/winutil v0.58.1 // indirect
+ github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
github.com/DataDog/go-sqllexer v0.0.16 // indirect
@@ -252,12 +270,11 @@ require (
github.com/DataDog/viper v1.13.5 // indirect
github.com/DataDog/zstd v1.5.5 // indirect
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect
- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
- github.com/antchfx/xmlquery v1.4.1 // indirect
- github.com/antchfx/xpath v1.3.1 // indirect
+ github.com/antchfx/xmlquery v1.4.2 // indirect
+ github.com/antchfx/xpath v1.3.2 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
@@ -272,12 +289,12 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/digitalocean/godo v1.118.0 // indirect
- github.com/distribution/reference v0.5.0 // indirect
- github.com/docker/docker v27.0.3+incompatible // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/docker v27.3.1+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/ebitengine/purego v0.8.0 // indirect
+ github.com/ebitengine/purego v0.8.1 // indirect
github.com/elastic/go-grok v0.3.1 // indirect
github.com/elastic/lunes v0.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
@@ -285,7 +302,7 @@ require (
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
@@ -296,7 +313,7 @@ require (
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-resty/resty/v2 v2.13.1 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.3 // indirect
@@ -318,7 +335,7 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
- github.com/hashicorp/consul/api v1.29.4 // indirect
+ github.com/hashicorp/consul/api v1.30.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -346,7 +363,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
@@ -372,18 +389,15 @@ require (
github.com/mostynb/go-grpc-compression v1.2.3 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
@@ -397,9 +411,9 @@ require (
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
- github.com/prometheus/client_golang v1.20.4 // indirect
+ github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.60.0 // indirect
+ github.com/prometheus/common v0.60.1 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/prometheus v0.54.1 // indirect
@@ -408,7 +422,7 @@ require (
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/shirou/gopsutil/v3 v3.24.5 // indirect
- github.com/shirou/gopsutil/v4 v4.24.9 // indirect
+ github.com/shirou/gopsutil/v4 v4.24.10 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
@@ -426,58 +440,53 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector v0.111.0 // indirect
- go.opentelemetry.io/collector/client v1.17.0 // indirect
- go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configgrpc v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.111.0 // indirect
- go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.17.0 // indirect
- go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/collector/service v0.111.0 // indirect
+ go.opentelemetry.io/collector v0.113.0 // indirect
+ go.opentelemetry.io/collector/client v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configauth v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configgrpc v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confignet v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configopaque v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtls v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/internal v0.113.0 // indirect
+ go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/collector/service v0.113.0 // indirect
go.opentelemetry.io/contrib/config v0.10.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
- go.opentelemetry.io/contrib/zpages v0.55.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect
+ go.opentelemetry.io/contrib/zpages v0.56.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
- go.opentelemetry.io/otel/log v0.6.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.53.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/log v0.7.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
- go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect
+ go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
@@ -498,17 +507,17 @@ require (
golang.org/x/tools v0.26.0 // indirect
gonum.org/v1/gonum v0.15.1 // indirect
google.golang.org/api v0.188.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.2.0 // indirect
- k8s.io/api v0.31.1 // indirect
- k8s.io/apimachinery v0.31.1 // indirect
- k8s.io/client-go v0.31.1 // indirect
+ k8s.io/api v0.31.2 // indirect
+ k8s.io/apimachinery v0.31.2 // indirect
+ k8s.io/client-go v0.31.2 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
diff --git a/comp/otelcol/ddflareextension/impl/go.sum b/comp/otelcol/ddflareextension/impl/go.sum
index 32ea7933682dc..7f3a943f94e67 100644
--- a/comp/otelcol/ddflareextension/impl/go.sum
+++ b/comp/otelcol/ddflareextension/impl/go.sum
@@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
-cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
+cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -59,12 +59,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
-github.com/DataDog/agent-payload/v5 v5.0.123 h1:fc/mME+zXBPo8i8690rVJXeqlZ1o+8ixIzNu43XP+o8=
-github.com/DataDog/agent-payload/v5 v5.0.123/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs=
-github.com/DataDog/datadog-agent/comp/core/log v0.54.0 h1:wP3bJua8qmURqLXkmYxrbELMJQ2oO1MuVNfxHJT4wiQ=
-github.com/DataDog/datadog-agent/comp/core/log v0.54.0/go.mod h1:mtMxZiwg13b4bHgDf8xE6FHgTcadzI5Cc0lx2MSY1mE=
-github.com/DataDog/datadog-api-client-go/v2 v2.26.0 h1:bZr0hu+hx8L91+yU5EGw8wK3FlCVEIashpx+cylWsf0=
-github.com/DataDog/datadog-api-client-go/v2 v2.26.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc=
+github.com/DataDog/agent-payload/v5 v5.0.134 h1:h0oP3vDTOsjW1uKIZxKsCjOV/40jkY2Y+42GKAVH9ig=
+github.com/DataDog/agent-payload/v5 v5.0.134/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs=
+github.com/DataDog/datadog-agent/comp/core/log v0.56.2 h1:qvBT+FfjKGqimyEvmsNHCZKbTfBJAdUZSVy2IZQ8HS4=
+github.com/DataDog/datadog-agent/comp/core/log v0.56.2/go.mod h1:ivJ/RMZjTNkoPPNDX+v/nnBwABLCiMv1vQA5tk/HCR4=
+github.com/DataDog/datadog-api-client-go/v2 v2.31.0 h1:JfJhYlHfLzvauI8u6h23smTooWYe6quNhhg9gpTszWY=
+github.com/DataDog/datadog-api-client-go/v2 v2.31.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
@@ -98,8 +98,8 @@ github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A=
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 h1:N4xzkSD2BkRwEZSPf3C2eUZxjS5trpo4gOwRh8mu+BA=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
@@ -117,10 +117,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
-github.com/antchfx/xmlquery v1.4.1 h1:YgpSwbeWvLp557YFTi8E3z6t6/hYjmFEtiEKbDfEbl0=
-github.com/antchfx/xmlquery v1.4.1/go.mod h1:lKezcT8ELGt8kW5L+ckFMTbgdR61/odpPgDv8Gvi1fI=
-github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk=
-github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
+github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA=
+github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA=
+github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U=
+github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@@ -191,12 +191,12 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUn
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
-github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -204,8 +204,8 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
-github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE=
+github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U=
github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64=
github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4=
@@ -230,8 +230,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -267,8 +267,8 @@ github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -386,10 +386,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
-github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
-github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
-github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
-github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
+github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
+github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@@ -492,8 +490,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
@@ -590,56 +588,58 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 h1:Kpfqjwp+nlgqacXkSS8T8iGiTMTFo8NoT8AoRomDOpU=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0/go.mod h1:ymbGC/jEXTq8mgHsxzV1PjVGHmV5hSQXmkYkFfGfuLw=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0 h1:9rSlNU6xUEcgneB7Pm502VMH63Abc8Ibpd9y0fBit3Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0/go.mod h1:J87FjckPF9jl1MLA36Yemp6JfsCMNk0QDUBb+7rLw7E=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0 h1:2XWbSIoIKQyFvn97pS4uc0Pxwe7EWCmZEg2r/+kiL58=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0/go.mod h1:WrnJQRKaivYllAC2B1KeCI5uYiYsZv3Hcbd6iQfr9Jg=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0 h1:p8vV11sj1cJFbd3B9tuGiA9gMGTvaSR4A57qQvVs9iY=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0/go.mod h1:sBcqg1DlNC6V8e455vASnIrr8LJX7stQ8V3wlbCsIhM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0 h1:59r33b8JeJUCQElz57S7AbwqUfQ1f1FVVjgyjiZp7Ec=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0/go.mod h1:4/axxWsIgRRptIg4olabh6ZXNL0Xt0Qjpaij8mD+dt8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0 h1:m/u2iRttl/nEjp0EZ9w371LLAqogw0tDn+wPU6D7ArY=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0/go.mod h1:mmkCl306sRZYt/7uNmjvuRRvbe/xUDSDm8fEAGSSMKI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 h1:5tERPDm3N3lTHWwSAK1KsGLc8/oi6HtjvLvrP21oZMM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0/go.mod h1:J1NJse8mJrVz3HeMoSRH3qAU5/y0cBRVf1Acl/lWVz8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 h1:QhEwQTGTXitMPbmyloNfLVz1r9YzZ8izJUJivI8obzs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 h1:AFzcAfNereWXW8SP5rPtslxv8kNo3LCnnCjUzl7ZCVM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0/go.mod h1:fEtKy/bUTeRKDblbFM9IyIA/QjhepmPs36TtjO1N7mo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 h1:jKLX/Ojg8bcHtUaoS585CMbvNJdEFHZjdx233SRdf3s=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0/go.mod h1:Sw1fPP1MkfGFoq1gnbLap5hdH1aoRUCVF5nrLymHa90=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 h1:kKfYR5GCVeLfkjZWMYZtnvv7NqKY9M1NaZUKVXye+2A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0/go.mod h1:tgQHCvogGlsnxQgtyhy+OwvBF4FDmK8dPlxs6nahdWs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 h1:g9U+7hjEm1yUgaO1rJxstfLW7aEeo3S1tUyyvMlf7A8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0/go.mod h1:tL9m9RF+SGLi80ai1SAy1S/o60kedifzjy0gtGQsnmY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0 h1:0MJmp4O7KUQOUmQYJEGNgtf30Nhx/3nLMn0jnU4Klhw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0/go.mod h1:4PYgwpscyZUUdQVLsd7dh+LXtm1QbWCvU47P3G/7tLg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 h1:W0SthymNSB2fzGuY2KUib6EVyj/uGO3hJvaM6nW0evE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0/go.mod h1:GQHN6IbBsaGmMJIOQcqA7RXiJi55rXldP3di5YJ1IYA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 h1:Ld/1EUAQ6z3CirSyf4A8waHzUAZbMPrDOno+7tb0vKM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0/go.mod h1:wAOT1iGOOTPTw2ysr0DW2Wrfi0/TECVgiGByRQfFiV4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 h1:RSbk3ty1D9zeBC/elcqVdJoZjpAa331Wha99yNHnH6w=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0/go.mod h1:iDBwbN0by4Y75X6j5PuRoJL5MpoaDv0l7s8dHFQHJPU=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0 h1:WJfe78FxmmshTWilSpwtDRHoOl8gxKAnTW0eT4kureY=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0/go.mod h1:XkFc7X0M2hnM4AYg6yX+r7btu208RG8THfM/npF/eKQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0 h1:f3PXc+bgwGtJIlWwtOXDYYNBKOyabhkLl6Q3j/Rc1ow=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0/go.mod h1:Ix1L1Godr9TS2DvHWSz/+0RBPa/D34azSvk/xU3dlGs=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0 h1:ITFlE4UHWUQQg5Vy7XfaRaE7hADsK3UTtEJ5xrPbWU8=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0/go.mod h1:tH0inPPuK/JYMDlLTe7ioGN1Zbp3NbNSp8H0Vc5C+uU=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0 h1:sYEmpMeBGMSoy8j1GSEkBO5GM578Qtq3QtmVcsYn51s=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0/go.mod h1:ULlXfiJdXw0ZzUGXLToQvGb1gQuIGC2BjtfyTnw3QtE=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0 h1:Aej9sL1v25Xf8AfM1fyRluBLV5g5+40GnagCb0/UJfY=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0/go.mod h1:QiXedkQif06vbjtVgnmmrHOunLUoLLAf10uaA/qKgts=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0 h1:hc407moydGsK9FfAxjP3Tw+akhmKO8PfaH18II3N7Q4=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0/go.mod h1:+1IJOoUqBzghufMZDSMhKzs1UOi39h8pMFDxWm/k1k4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0 h1:qudJNiKFfxly/lPyfdZNwnT6OKCzRFw0BI0E5CI6WwU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0/go.mod h1:eHVWQ484ohG4ZjaV8KTej3CMVEPh0w6zBXfi+qqvyGw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 h1:7A8MgFPYRQWq1RkFBktq01CW+eTYhiGML0IxQNv2uaM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0/go.mod h1:E1pc7mDXH+5s7RyXw291h8lz2dhzPzaDrAHqP1Lawvw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 h1:EZ/ZNsovNcQq+wwAbTAWNY+6BHnv24NxvVoC6eYmtg8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0/go.mod h1:u21dEQ9yQ0JyLMSrKLWWzHG/lHSlteNfa/EQ7Vqcle4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 h1:SjzsWZqrxUoRWvXzwrbjQCnoJRJApJVCCqjrtflapMM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0/go.mod h1:sYIh0S63ztcL2q9gEKhvviDQ5caH1sFE1oeFRDQOQ6A=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0 h1:E/D5TwJyKN19p1FQ0XD5r5G1uH9NH/HVAM0e1hFMwFU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0/go.mod h1:FcClDm9XVV5tzUDzmH2Mhe6TfYiZ/3GSAQITnuCjZgg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0 h1:BidrOROxYyacsUzNJyPZdvuX9VpbmFnSJXAt0yz6cXU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0/go.mod h1:TM5DUkjqHozcpjCX36f7cDv6Rv+J8ysZ52zCYAEQZCk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 h1:V9CRl77lPG2xFPpnRf1QLiePo7FZngt+vw6M2KLdRMU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0/go.mod h1:zL+Msnlb1TEObHQ2RNnPKbVr3GhSdyI2ZqGtiSxg2/E=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0 h1:sfPQ3RPyimzEzB2aQtUaEu7ElwDmlze+q0moWV9YpkI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0/go.mod h1:QMenHMTJ5qrpghfPoMMpS0QwW6pQrXOqHn7QcNgn+NU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0 h1:5YU2trp7n56EyDS9dEyY1UxyaW6wxB4KiyKoyjDYooo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0/go.mod h1:EBtBK1lE/HMUz51cafBLlJAXZ/2ZDRCV4C+rT04fMYM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 h1:e2WOkaj5AWPaKTU4l+GEXGrEUbrAhQPQ7zLUdnXLGX8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0/go.mod h1:x+DR/o7xddbdhpQP2QKBJkPUdrj2tl/uR1OJ/sqlrWc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0 h1:GERkhEtH3Uk8CMBzFoBmMD7fBfcrtIM9hopbQqzdvNs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0/go.mod h1:+VbefhdCgKiTXsIU6sQj9L96Ow53a8EMcUW6EMt3zTA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 h1:Ux4k3jMTufk4HZ4RNYrqLxIt6wKEeOFAndzGWBjiUqs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0/go.mod h1:GWsSVmzpKZucOefuqqvKEUbnqGRV9OCSX2vzTjC/sbI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0 h1:n44G0Quw+OQMZ+ELOo/Aw1qcwVu7LXae8GBVjVSE+HQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0/go.mod h1:6dxGDpWsWnzC5UK3RhgfdXRC+3c8RLCO+cC+RiS+jIU=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0 h1:7DqvnAOXIPv6PEKA347VXACc07E1utEWcjuxsY4YOXA=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0/go.mod h1:6hlplIB2LiSciMabYB5IpwrBn3Hl/P8JakNm0uNkVug=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 h1:BCev4nJfHH2u9AsWFfxR1o1Vt5HoW9myN4jaktZInRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0/go.mod h1:xJ8w6JN/tfRpUXTU6jx/bYmTIcy7OTz7PVFVR/SdqC8=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0 h1:EXgr2fMBJJFnmw3GVRD2fhX3Dqq11g1IoUsrhWfcDn4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0/go.mod h1:uDdKEduyex67rYq75wyUJC1Wl0QhrqBE09WDa1SznMA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 h1:vKtNSM3VQBTJx1ecf+I1iqn4kj7fKif1SpBLQ+numf8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0/go.mod h1:Iw3ndTvDCbmN6uugOfIqmfb1pgCWTYg+tXXS2rr3QJo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0 h1:XzI7y1bC29bnJ9VgyA0JCws0e/rIyt7yteT5gGLe6nw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0/go.mod h1:OxdhzDFd7/cRck6LeWLF2TUC/QhwoJSUlw35NuVbvzA=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0 h1:UUo4VOhBMIm1ZTs9xmZO4IFazLHsjUZnPkS0+q7qNL4=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0/go.mod h1:5Ntpnh1KsrfOnPVUWCk/lte4Gma12bJPU8EhBS3AKSE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0 h1:DF+kp5Gtg5B95VncWJb1oOIvf9PGpZ/gxWAHLdIrTEk=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0/go.mod h1:UzFds6+yARb/SHnC93hMeGWKJIDA131nm2dxZW+kTsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0 h1:KkHeODEukk2RveIEHvV5dPe06oA2PKAKbpjVZPtCRsQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0/go.mod h1:Ijvd5VMB2tstz3+3BiQy5azewQ31N4fytMFNdo8dLWE=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0 h1:abeLe2WANVWpnNua41Aa+RTmYYGs0gk1oQRd2/XH7Uo=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0/go.mod h1:Nij85WmJr/+q0HeAvGulEYxFE+PMlhFelPWN6yzCuuw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0 h1:vgYhhliqQ6WUy5b1BE0ILJQKTweaLDPI5l/bUIunqLo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0/go.mod h1:UG/8zqyqbdN0HHkiWC7GZW4wFL4GIyRtsshc1RY8bGo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0 h1:c4vPI/PrRPnueoaKJKzBztsASIn5yZ7BT7uc4PHR39E=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0/go.mod h1:MR9VrhTtPazVAEWR/RPQln1i0Cmrc/6e2+zRpI/gwhM=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0 h1:frNZmJqTjdGO4vkxM2LN5URbddpD+R8taOCtDz3JJiQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0/go.mod h1:qFJOAhv+agSMdJSmsIW4QDsTWIdWo8NRZvY3OV2iWV8=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -665,6 +665,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -687,8 +689,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
-github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -702,8 +704,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
-github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
+github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -724,8 +726,8 @@ github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -738,8 +740,8 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
-github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
-github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
+github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM=
+github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
@@ -842,154 +844,170 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
-go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0 h1:XwHBWCP0m/d6YZ0VZltzVvnz5hDB9ik7sPRjJIdmjUk=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0/go.mod h1:K9OLwZM8dGNL1Jul/FGxlRsnLd1umgDyA+yxq2BNXUs=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/confignet v1.17.0 h1:cBmDdiPuIVrHiecgCKyXhRYmDOz9Do5IM7O1JhbB3es=
-go.opentelemetry.io/collector/config/confignet v1.17.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 h1:KH0ABOBfSPp5XZtHkoXeI9wKoOD9B0eN6TDo08SwN/c=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0/go.mod h1:jyFbV9hLrYJf2zNjqcpzkzB6zmPj/Ohr+S+vmPuxyMY=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 h1:R/U0uWAyppNrxvF+piqhnhcrPSNz3wnwHyEIRCbrmh0=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0/go.mod h1:3mtUk7wwDQyPUsHtCOLi2v0uSZWfC00BhOhqHs4CWs4=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0 h1:uNMlftoTRqjavhoGY2LvUc4z0+lDht1UHrvj856skRU=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0/go.mod h1:1Vhweh5dDeTUOmcw5WSGHPgHUwZzouf3y2dQr4yFWjA=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 h1:FtKwwHS8NSNJWrhE7JsFlYhe+2GojENfOQbhQMSTyRo=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0/go.mod h1:9/R8ucfVQEEEHMv9b7M6rSB8nF2k+MfIO93vbDEsaMU=
-go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro=
-go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 h1:KiypGuW+JG1gV9l6pvSEIMKwn+MLJn0Ol62HMe5ytr4=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0/go.mod h1:7ihw3KDcvrY5kXIRNxB64Pz6kguf5Q0x9mJAvbBLT5Y=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 h1:eOyd1InTuymfIP4oMzJki28JjpGQzOEK6Y0YlI6pwgA=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0/go.mod h1:nOUveQ4KWFqlCA6b0L5DXMosZCcNtit8abEuLHwBaUM=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0 h1:e7UHbu515LfkFKHdXdOvz0gQP6jXD+uuoKs1PRXHEw0=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0/go.mod h1:0+TSVnAEw9hyF34b0eu36IFVLpAgpxOugAI2ZgNPX18=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY=
-go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws=
-go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk=
-go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0 h1:kiXvbIR1K8Tcv10ffaA9MvcPoGpm6uitaXzfhDZnV3o=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0/go.mod h1:7jwDuhMkglGVSyJT6CQ1vE7A6fjYTvbap7/QVl3P8kQ=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 h1:JoBjX0LjmQ3n22o54sxAN9T6sgxumBLDqq0RElvYAVc=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0/go.mod h1:8Dw89aInFh4dX3A0iyIcpbQ1A/8hVWtxjrJKyAOb9TQ=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0 h1:JWg6F//9AH34KVL1RkRVpcyJpbzIWMtpCLxggeo3gsY=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0/go.mod h1:FpiGrlkIhMh9gNzaw29m5zhSkRRruZnwB2RyGI0yCsw=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0 h1:VsQ55DvHvjYop+wbpY6qCSF0cfoMNMZEd0pANa5l+9Y=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0/go.mod h1:/zUX2GHa7CIeqGRl+hpQk3zQ1QCaUpBK42XGqrXAbzQ=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
-go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
+go.opentelemetry.io/collector v0.113.0 h1:dBuo2/OKBhoMCR86W4fFJLXGQ0gJfKRmi65AZwFkU2I=
+go.opentelemetry.io/collector v0.113.0/go.mod h1:XbjD4Yw9LunLo3IJu3ZZytNZ0drEVznxw1Z14Ujlw3s=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0 h1:rNbRd033JlIeU+TH+3bEt4OwRlEwrktWdf6V+VUJUPk=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0/go.mod h1:InXxPUj1oxJ57Sl954d2tQxXTgVHhfppFYjMwGjQukg=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0 h1:f8O/I5pVRN86Gx5mHekNx92S6fGdOS4VcooRJKWe6Bs=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0/go.mod h1:AiaW5YW1LD0/WlZuc8eZuZPBH6PA9QqsiAYRX1iC6T0=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0 h1:TYwyk4ea3U+5MYcEjrzZAaonBcLlabQu8CZeB7ekAYY=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0/go.mod h1:i3mL4OSGI5JM0hnzHujhJK+LDlvO3XrJxBsuclfU/jY=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0 h1:a077jcs3DVtaVdmgmCk3x4rRYuTkIqMDsoUc+VICHZk=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0/go.mod h1:HjYkzhHbwUacv27nq0JLsslGpbtrXyyfU30Oc72AWLU=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0 h1:8LoQxjlduFQUEwYuHWnxEj0A+GcAtpv2qPpDJVz7A5E=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0/go.mod h1:Y8ErEl5m9+1AWzWcMn52PATH5dw50wuyyPMffK62RCI=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0 h1:oV66DKiEdAt8EMZqGSChK2iEOxjrVaWRhf4OqqmqjbM=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0/go.mod h1:jtNUdO6i1k38BG7vFst+d1jk/N+c419uVR8HB4J0VjI=
+go.opentelemetry.io/collector/connector v0.113.0 h1:ii+s1CjsLxtglqRlFs6tv8UU/uX45dyN9lbTRbR0p8g=
+go.opentelemetry.io/collector/connector v0.113.0/go.mod h1:KmA8eQouTVxVk65Bf6utuMjWovMuOvNVRcYV60CAGtc=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 h1:yAEKTxVGpBtHrrXeZFrBcVOQkduxCncH0o4hqKrDRyw=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0/go.mod h1:+mwzwIZ1cEK29ByfC38uF8hmFO8Wf9ShT1c756XX+RI=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0 h1:WHekoL0izkrKLVQLv79v0QhqfnXkVcw0sgdF07EqWLM=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0/go.mod h1:KouywNfkxRf+yzbI2pdolzTLkLoCV4ASEI2o2pDt+Cg=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 h1:2kLIt+6dGmhCd48CWXh3IEon/uW4+c8y81IGCA/h8wE=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0/go.mod h1:/eESy7Ifyf7G6r6WUpEOq2tnfjIJ2QNB2EvZcEu0aWA=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0 h1:iShn3SuSpx78YLgl7fQCJsHLs7z0RDtbN58/Amoy5xc=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0/go.mod h1:O1dLnYA81a+ZecBD89vjZSBgkAnhnfXwsmYsE7LP2/s=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 h1:Auz2vZYReIlyDvJ162OCO8XcV7L2BIbFb5HJWxerc5A=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0/go.mod h1:JQuawcAfDuzNneDF5Ep1CZJ5snsLp6Bh1gZcHhja7yU=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0 h1://7diunG5SohqaYfqvHzCtcfrY7y3WQj0vklFYgeNW4=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0/go.mod h1:THF0eq4lA6dYOho53iKFCBOv91HEeISZyep5dXr+fBU=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0 h1:22Srn4V6ogOdi4Bn6eKtKqAidWyjPkYKYDR3Xq91nFY=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0/go.mod h1:BRA54WRyPS9RYDIUEGxxJvxJ/uZ66++bCFPHliDstCQ=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 h1:GuJzpnrJtsMrKWGmb1VL4EqL6x1HDtZmtvy3yEjth6Y=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0/go.mod h1:oa72qndu7nAfEeEpDyDi9qLcaSJGIscLc/eeojFADx0=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0 h1:b/Clxso9uVwLVYjvRQ1NGBWHpUEZ/++uA5sJbBj0ryo=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0/go.mod h1:5csGYy9Ydfy6Hpw3Tod864P6HUEZpA6UiuPJPG3TjSU=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 h1:Beu2zAN6/EDXQ6hMFU6FT1BsnU5FXmWNOlfTAhrgbGc=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0/go.mod h1:WUXbc4L6KJ3SpmsxBgId0OYzRDuS7n274kNpqrgnSmY=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 h1:a4gT+t+rboCaH70anhu+ZQp9IJ7UjVeZxZJvxTBgCqU=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0/go.mod h1:6WDDyjI4pbtfUmtv/JKLs7OwieEEvaDVb3Zcc4oA9Vg=
+go.opentelemetry.io/collector/otelcol v0.113.0 h1:t32gA8Pg9lsqYQml4mgvHdqFSsjvPZMvGywi0zXQYjk=
+go.opentelemetry.io/collector/otelcol v0.113.0/go.mod h1:PV6pDLPEaulRs3ceWYNEDuG5100F35I5VzeC2ekT/vY=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0 h1:bfu9oQQbO6KEcpgh7muc1ixsGQs+qFWwi9LyabGILqw=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0/go.mod h1:0bag/J2REbnIKKKHvYe0RqyjmsUv4OJH14kNef+lD4Q=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 h1:PwQnErsLvEd1x6VIyjLmKQot9huKWqIfEz1kd+8aj4k=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0/go.mod h1:tChJYsCG3wc6JPT9aJO3y+32V14NhmCFZOh3k5ORGdQ=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTFIi8ziIa2idZ5MRlBIf4Wr72gZNgXb0=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0 h1:Kgan6/DCH1YZzOztXPPair+V2czPmrJxxrIIxLVYmn4=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0/go.mod h1:1nVoRLC/antEw4gvcyaRBT3aBt7nh3KBASWLLhmm0Ts=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0 h1:yhnj8kmh1IQ4g6fIWvhum/wYPUU2WmRpQuy1iSvf4e4=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0/go.mod h1:3OB+oJlOb1rlLLdBwxae4g2Qh5C97Eg17HVveIddUCw=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/service v0.113.0 h1:SFT+kWuj9TehC34MLEWfXH6QocGl3MYqLJ7UbxZtWzM=
+go.opentelemetry.io/collector/service v0.113.0/go.mod h1:6+JY80Yd4J4RWpvRmpCUUZFOZKGVs9a1QKCKPlDrKfs=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw=
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
-go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
-go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ=
+go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko=
+go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0=
-go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8=
-go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 h1:HZgBIps9wH0RDrwjrmNa3DVbNRW60HEhdzqZFyAp3fI=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0/go.mod h1:RDRhvt6TDG0eIXmonAx5bd9IcwpqCkziwkOClzWKwAQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
+go.opentelemetry.io/otel/log v0.7.0 h1:d1abJc0b1QQZADKvfe9JqqrfmPYQCz2tUSO+0XZmuV4=
+go.opentelemetry.io/otel/log v0.7.0/go.mod h1:2jf2z7uVfnzDNknKTO9G+ahcOAyWcp1fJmk/wJjULRo=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI=
-go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE=
+go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ=
+go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
@@ -1348,10 +1366,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
+google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1381,8 +1399,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1423,12 +1441,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
-k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
-k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
-k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
-k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
+k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
+k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
+k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
+k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
+k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml
index c2bba3e8c5c3f..dd7b829875fc5 100644
--- a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml
+++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml
@@ -234,6 +234,7 @@ service:
level: info
output_paths:
- stderr
+ processors: []
sampling:
enabled: true
initial: 10
diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml
index 8bf70b012b472..02dbe2cbcdf4d 100644
--- a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml
+++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml
@@ -167,6 +167,7 @@ service:
level: info
output_paths:
- stderr
+ processors: []
sampling:
enabled: true
initial: 10
diff --git a/comp/otelcol/logsagentpipeline/go.mod b/comp/otelcol/logsagentpipeline/go.mod
index 1859b4075ba35..38c20efb7ef24 100644
--- a/comp/otelcol/logsagentpipeline/go.mod
+++ b/comp/otelcol/logsagentpipeline/go.mod
@@ -55,6 +55,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../../pkg/util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../../pkg/version
)
@@ -103,6 +104,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod
index a39b31a1e35e8..3a7f3981ce689 100644
--- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod
+++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod
@@ -56,6 +56,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../../../pkg/util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version
)
@@ -118,6 +119,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go
index ce7e98d44c7c0..e3835151c1dca 100644
--- a/comp/otelcol/otlp/collector.go
+++ b/comp/otelcol/otlp/collector.go
@@ -31,8 +31,8 @@ import (
otlpmetrics "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
"github.com/DataDog/datadog-agent/comp/core/tagger/common"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util"
"github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter"
diff --git a/comp/otelcol/otlp/collector_test.go b/comp/otelcol/otlp/collector_test.go
index 1e44b6e1f79c6..6134d8a6a9a47 100644
--- a/comp/otelcol/otlp/collector_test.go
+++ b/comp/otelcol/otlp/collector_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/otelcol"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/logs/message"
@@ -24,7 +24,7 @@ import (
)
func TestGetComponents(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
_, err := getComponents(serializermock.NewMetricSerializer(t), make(chan *message.Message), fakeTagger)
// No duplicate component
@@ -32,7 +32,7 @@ func TestGetComponents(t *testing.T) {
}
func AssertSucessfulRun(t *testing.T, pcfg PipelineConfig) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
p, err := NewPipeline(pcfg, serializermock.NewMetricSerializer(t), make(chan *message.Message), fakeTagger)
require.NoError(t, err)
@@ -59,7 +59,7 @@ func AssertSucessfulRun(t *testing.T, pcfg PipelineConfig) {
}
func AssertFailedRun(t *testing.T, pcfg PipelineConfig, expected string) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
p, err := NewPipeline(pcfg, serializermock.NewMetricSerializer(t), make(chan *message.Message), fakeTagger)
require.NoError(t, err)
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go b/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go
index a75d48585168b..2b4b5e85e7b77 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go
@@ -211,7 +211,7 @@ func (f *factory) createTracesExporter(
tracex := newTracesExporter(ctx, set, cfg, f.traceagentcmp)
- return exporterhelper.NewTracesExporter(
+ return exporterhelper.NewTraces(
ctx,
set,
cfg,
@@ -246,7 +246,7 @@ func (f *factory) createMetricsExporter(
},
QueueConfig: cfg.QueueConfig,
}
- return sf.CreateMetricsExporter(ctx, set, ex)
+ return sf.CreateMetrics(ctx, set, ex)
}
func (f *factory) consumeStatsPayload(ctx context.Context, wg *sync.WaitGroup, statsIn <-chan []byte, tracerVersion string, agentVersion string, logger *zap.Logger) {
@@ -295,5 +295,5 @@ func (f *factory) createLogsExporter(
OtelSource: "otel_agent",
LogSourceName: logsagentexporter.LogSourceName,
}
- return lf.CreateLogsExporter(ctx, set, lc)
+ return lf.CreateLogs(ctx, set, lc)
}
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
index 9254ddbb724e6..c87aea4ec1514 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod
@@ -86,6 +86,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../../../../../pkg/util/system/
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../../../pkg/util/system/socket/
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../../../pkg/util/testutil/
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../../../../../pkg/util/utilizationtracker/
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../../../pkg/util/winutil/
github.com/DataDog/datadog-agent/pkg/version => ../../../../../../pkg/version
)
@@ -106,22 +107,29 @@ require (
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/config/configauth v0.111.0
- go.opentelemetry.io/collector/config/confighttp v0.111.0
- go.opentelemetry.io/collector/config/confignet v0.104.0
- go.opentelemetry.io/collector/config/configopaque v1.17.0
- go.opentelemetry.io/collector/config/configretry v1.17.0
- go.opentelemetry.io/collector/config/configtls v1.17.0
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/consumer v0.111.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/featuregate v1.11.0
- go.opentelemetry.io/collector/pdata v1.17.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/config/configauth v0.113.0
+ go.opentelemetry.io/collector/config/confighttp v0.113.0
+ go.opentelemetry.io/collector/config/confignet v1.19.0
+ go.opentelemetry.io/collector/config/configopaque v1.19.0
+ go.opentelemetry.io/collector/config/configretry v1.19.0
+ go.opentelemetry.io/collector/config/configtls v1.19.0
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/consumer v0.113.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0
+ go.opentelemetry.io/collector/featuregate v1.19.0
+ go.opentelemetry.io/collector/pdata v1.19.0
go.opentelemetry.io/otel/metric v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0
- google.golang.org/protobuf v1.34.2
+ google.golang.org/protobuf v1.35.1
+)
+
+require (
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
)
require (
@@ -189,6 +197,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.57.1 // indirect
github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
@@ -216,11 +225,11 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -237,7 +246,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
@@ -281,23 +290,22 @@ require (
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/client v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
+ go.opentelemetry.io/collector/client v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/internal v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
index ee08debbb0174..3d185cd732f6b 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum
@@ -98,8 +98,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -113,8 +113,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -191,8 +191,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
@@ -263,6 +263,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -383,72 +385,78 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8=
-go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY=
-go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ=
@@ -596,8 +604,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go b/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go
index 6a0278dbc26ca..ea47ac7cab0f5 100644
--- a/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go
+++ b/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go
@@ -94,7 +94,7 @@ func testTraceExporter(enableReceiveResourceSpansV2 bool, t *testing.T) {
traceagent := pkgagent.NewAgent(ctx, tcfg, telemetry.NewNoopCollector(), &ddgostatsd.NoOpClient{}, gzip.NewComponent())
f := NewFactory(testComponent{traceagent}, nil, nil, nil, metricsclient.NewStatsdClientWrapper(&ddgostatsd.NoOpClient{}))
- exporter, err := f.CreateTracesExporter(ctx, params, &cfg)
+ exporter, err := f.CreateTraces(ctx, params, &cfg)
assert.NoError(t, err)
go traceagent.Run()
@@ -136,7 +136,7 @@ func testNewTracesExporter(enableReceiveResourceSpansV2 bool, t *testing.T) {
// The client should have been created correctly
f := NewFactory(testComponent{traceagent}, nil, nil, nil, metricsclient.NewStatsdClientWrapper(&ddgostatsd.NoOpClient{}))
- exp, err := f.CreateTracesExporter(context.Background(), params, cfg)
+ exp, err := f.CreateTraces(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NotNil(t, exp)
}
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go
index febe51094ebad..24f8352902e6c 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go
@@ -79,7 +79,7 @@ func (f *factory) createLogsExporter(
ctx, cancel := context.WithCancel(ctx)
// cancel() runs on shutdown
- return exporterhelper.NewLogsExporter(
+ return exporterhelper.NewLogs(
ctx,
set,
c,
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory_test.go
index 162d1535c88dd..47b0b9730d65c 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/factory_test.go
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/factory_test.go
@@ -24,6 +24,6 @@ func TestNewLogsExporter(t *testing.T) {
cfg := factory.CreateDefaultConfig()
set := exportertest.NewNopSettings()
- _, err := factory.CreateLogsExporter(context.Background(), set, cfg)
+ _, err := factory.CreateLogs(context.Background(), set, cfg)
assert.NoError(t, err)
}
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod
index b910a27557d63..1b5051503e8a6 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod
@@ -52,9 +52,15 @@ require (
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0
github.com/stormcat24/protodep v0.1.8
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
+)
+
+require (
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
)
require (
@@ -126,25 +132,24 @@ require (
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/trace v1.30.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
@@ -156,7 +161,7 @@ require (
golang.org/x/text v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum
index 31ea6a757c39f..130be4640b5e7 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum
@@ -272,52 +272,56 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
-go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
-go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
-go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -458,8 +462,8 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
index 839b5a294c1d2..f7ed2dc597281 100644
--- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
+++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go
@@ -270,7 +270,7 @@ func TestLogsExporter(t *testing.T) {
LogSourceName: tt.args.logSourceName,
}
ctx := context.Background()
- exp, err := f.CreateLogsExporter(ctx, params, cfg)
+ exp, err := f.CreateLogs(ctx, params, cfg)
require.NoError(t, err)
require.NoError(t, exp.ConsumeLogs(ctx, tt.args.ld))
diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go b/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go
index 9287cb3ad2f22..a96df0d4762e5 100644
--- a/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go
+++ b/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go
@@ -179,7 +179,7 @@ func Test_ConsumeMetrics_Tags(t *testing.T) {
}, nil, nil)
cfg := f.CreateDefaultConfig().(*ExporterConfig)
cfg.Metrics.Tags = strings.Join(tt.extraTags, ",")
- exp, err := f.CreateMetricsExporter(
+ exp, err := f.CreateMetrics(
ctx,
exportertest.NewNopSettings(),
cfg,
diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/factory.go b/comp/otelcol/otlp/components/exporter/serializerexporter/factory.go
index bc3340b378f76..2dd124bf49182 100644
--- a/comp/otelcol/otlp/components/exporter/serializerexporter/factory.go
+++ b/comp/otelcol/otlp/components/exporter/serializerexporter/factory.go
@@ -74,7 +74,7 @@ func (f *factory) createMetricExporter(ctx context.Context, params exp.Settings,
return nil, err
}
- exporter, err := exporterhelper.NewMetricsExporter(ctx, params, cfg, newExp.ConsumeMetrics,
+ exporter, err := exporterhelper.NewMetrics(ctx, params, cfg, newExp.ConsumeMetrics,
exporterhelper.WithQueue(cfg.QueueConfig),
exporterhelper.WithTimeout(cfg.TimeoutConfig),
// the metrics remapping code mutates data
diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/factory_test.go b/comp/otelcol/otlp/components/exporter/serializerexporter/factory_test.go
index 12a800dded27b..3b61e62fe2695 100644
--- a/comp/otelcol/otlp/components/exporter/serializerexporter/factory_test.go
+++ b/comp/otelcol/otlp/components/exporter/serializerexporter/factory_test.go
@@ -51,7 +51,7 @@ func TestNewMetricsExporter(t *testing.T) {
factory := newFactory()
cfg := factory.CreateDefaultConfig()
set := exportertest.NewNopSettings()
- exp, err := factory.CreateMetricsExporter(context.Background(), set, cfg)
+ exp, err := factory.CreateMetrics(context.Background(), set, cfg)
assert.NoError(t, err)
assert.NotNil(t, exp)
}
@@ -64,7 +64,7 @@ func TestNewMetricsExporterInvalid(t *testing.T) {
expCfg.Metrics.HistConfig.Mode = "InvalidMode"
set := exportertest.NewNopSettings()
- _, err := factory.CreateMetricsExporter(context.Background(), set, cfg)
+ _, err := factory.CreateMetrics(context.Background(), set, cfg)
assert.Error(t, err)
}
@@ -73,7 +73,7 @@ func TestNewTracesExporter(t *testing.T) {
cfg := factory.CreateDefaultConfig()
set := exportertest.NewNopSettings()
- _, err := factory.CreateTracesExporter(context.Background(), set, cfg)
+ _, err := factory.CreateTraces(context.Background(), set, cfg)
assert.Error(t, err)
}
@@ -82,6 +82,6 @@ func TestNewLogsExporter(t *testing.T) {
cfg := factory.CreateDefaultConfig()
set := exportertest.NewNopSettings()
- _, err := factory.CreateLogsExporter(context.Background(), set, cfg)
+ _, err := factory.CreateLogs(context.Background(), set, cfg)
assert.Error(t, err)
}
diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod
index 7f97b96915602..cde989ca9301d 100644
--- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod
+++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod
@@ -76,16 +76,16 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0
github.com/stretchr/testify v1.9.0
github.com/tinylib/msgp v1.1.8
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/config/confignet v0.104.0
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/consumer v0.111.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0
- go.opentelemetry.io/collector/receiver v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/config/confignet v1.19.0
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/consumer v0.113.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0
+ go.opentelemetry.io/collector/receiver v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
go.uber.org/multierr v1.11.0
)
@@ -152,7 +152,7 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -198,21 +198,20 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect
- go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/trace v1.30.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.22.2 // indirect
@@ -226,7 +225,14 @@ require (
golang.org/x/tools v0.26.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
+
+require go.opentelemetry.io/collector/exporter/exportertest v0.113.0
+
+require (
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
+)
diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum
index a9aa54adfab35..3ace4c7d8dde0 100644
--- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum
+++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum
@@ -82,8 +82,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -324,58 +324,62 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8=
-go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ=
go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
-go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
-go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
-go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -521,8 +525,8 @@ google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFN
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go
new file mode 100644
index 0000000000000..44e2ff6be69a0
--- /dev/null
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go
@@ -0,0 +1,135 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package infraattributesprocessor
+
+import (
+ "fmt"
+ "go.uber.org/zap"
+ "strings"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ conventions "go.opentelemetry.io/collector/semconv/v1.21.0"
+
+ "github.com/DataDog/datadog-agent/comp/core/tagger/tags"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/types"
+)
+
+var unifiedServiceTagMap = map[string][]string{
+ tags.Service: {conventions.AttributeServiceName},
+ tags.Env: {conventions.AttributeDeploymentEnvironment, "deployment.environment.name"},
+ tags.Version: {conventions.AttributeServiceVersion},
+}
+
+// GenerateKubeMetadataEntityID is a function that generates an entity ID for a Kubernetes resource.
+type GenerateKubeMetadataEntityID func(group, resource, namespace, name string) string
+
+// processInfraTags collects entities/tags from resourceAttributes and adds infra tags to resourceAttributes
+func processInfraTags(
+ logger *zap.Logger,
+ tagger taggerClient,
+ cardinality types.TagCardinality,
+ generateID GenerateKubeMetadataEntityID,
+ resourceAttributes pcommon.Map,
+) {
+ entityIDs := entityIDsFromAttributes(resourceAttributes, generateID)
+ tagMap := make(map[string]string)
+
+ // Get all unique tags from resource attributes and global tags
+ for _, entityID := range entityIDs {
+ entityTags, err := tagger.Tag(entityID, cardinality)
+ if err != nil {
+ logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err))
+ continue
+ }
+ for _, tag := range entityTags {
+ k, v := splitTag(tag)
+ _, hasTag := tagMap[k]
+ if k != "" && v != "" && !hasTag {
+ tagMap[k] = v
+ }
+ }
+ }
+ globalTags, err := tagger.GlobalTags(cardinality)
+ if err != nil {
+ logger.Error("Cannot get global tags", zap.Error(err))
+ }
+ for _, tag := range globalTags {
+ k, v := splitTag(tag)
+ _, hasTag := tagMap[k]
+ if k != "" && v != "" && !hasTag {
+ tagMap[k] = v
+ }
+ }
+
+ // Add all tags as resource attributes
+ for k, v := range tagMap {
+ otelAttrs, ust := unifiedServiceTagMap[k]
+ if !ust {
+ resourceAttributes.PutStr(k, v)
+ continue
+ }
+
+ // Add OTel semantics for unified service tags which are required in mapping
+ hasOTelAttr := false
+ for _, otelAttr := range otelAttrs {
+ if _, ok := resourceAttributes.Get(otelAttr); ok {
+ hasOTelAttr = true
+ break
+ }
+ }
+ if !hasOTelAttr {
+ resourceAttributes.PutStr(otelAttrs[0], v)
+ }
+ }
+}
+
+// TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method
+// entityIDsFromAttributes gets the entity IDs from resource attributes.
+// If not found, an empty string slice is returned.
+func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataEntityID) []types.EntityID {
+ entityIDs := make([]types.EntityID, 0, 8)
+ // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers.
+ if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.ContainerID, containerID.AsString()))
+ }
+ if containerImageID, ok := attrs.Get(conventions.AttributeContainerImageID); ok {
+ splitImageID := strings.SplitN(containerImageID.AsString(), "@sha256:", 2)
+ if len(splitImageID) == 2 {
+ entityIDs = append(entityIDs, types.NewEntityID(types.ContainerImageMetadata, fmt.Sprintf("sha256:%v", splitImageID[1])))
+ }
+ }
+ if ecsTaskArn, ok := attrs.Get(conventions.AttributeAWSECSTaskARN); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.ECSTask, ecsTaskArn.AsString()))
+ }
+ if deploymentName, ok := attrs.Get(conventions.AttributeK8SDeploymentName); ok {
+ namespace, namespaceOk := attrs.Get(conventions.AttributeK8SNamespaceName)
+ if namespaceOk {
+ entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesDeployment, fmt.Sprintf("%s/%s", namespace.AsString(), deploymentName.AsString())))
+ }
+ }
+ if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "namespaces", "", namespace.AsString())))
+ }
+
+ if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "nodes", "", nodeName.AsString())))
+ }
+ if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString()))
+ }
+ if processPid, ok := attrs.Get(conventions.AttributeProcessPID); ok {
+ entityIDs = append(entityIDs, types.NewEntityID(types.Process, processPid.AsString()))
+ }
+ return entityIDs
+}
+
+func splitTag(tag string) (key string, value string) {
+ split := strings.SplitN(tag, ":", 2)
+ if len(split) < 2 || split[0] == "" || split[1] == "" {
+ return "", ""
+ }
+ return split[0], split[1]
+}
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod
index ebb4c9bcd89cc..9426ba98eff3e 100644
--- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod
@@ -8,6 +8,7 @@ replace (
github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../../core/flare/types
github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../../core/secrets
github.com/DataDog/datadog-agent/comp/core/tagger/common => ../../../../../core/tagger/common
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags => ../../../../../core/tagger/tags
github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../../../core/tagger/types
github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../../../core/tagger/utils
github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../../core/telemetry
@@ -33,17 +34,19 @@ replace (
)
require (
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000
github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/consumer v0.111.0
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
- go.opentelemetry.io/collector/processor v0.111.0
- go.opentelemetry.io/collector/semconv v0.111.0
- go.opentelemetry.io/otel/metric v1.30.0
- go.opentelemetry.io/otel/trace v1.30.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/consumer v0.113.0
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
+ go.opentelemetry.io/collector/processor v0.113.0
+ go.opentelemetry.io/collector/processor/processortest v0.113.0
+ go.opentelemetry.io/collector/semconv v0.113.0
+ go.opentelemetry.io/otel/metric v1.31.0
+ go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0
)
@@ -52,7 +55,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -65,23 +68,22 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
- go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
- go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum
index 60a28c5c256be..b6c0a0726eaad 100644
--- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum
@@ -6,8 +6,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@@ -48,46 +48,46 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
-go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
-go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
-go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
-go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
-go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
-go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
-go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -129,8 +129,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go
index 0ff72e0b56605..ba66a8d4b8118 100644
--- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go
@@ -38,39 +38,7 @@ func (ialp *infraAttributesLogProcessor) processLogs(_ context.Context, ld plog.
rls := ld.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
resourceAttributes := rls.At(i).Resource().Attributes()
- entityIDs := entityIDsFromAttributes(resourceAttributes, ialp.generateID)
- tagMap := make(map[string]string)
-
- // Get all unique tags from resource attributes and global tags
- for _, entityID := range entityIDs {
- entityTags, err := ialp.tagger.Tag(entityID, ialp.cardinality)
- if err != nil {
- ialp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err))
- continue
- }
- for _, tag := range entityTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
- }
- globalTags, err := ialp.tagger.GlobalTags(ialp.cardinality)
- if err != nil {
- ialp.logger.Error("Cannot get global tags", zap.Error(err))
- }
- for _, tag := range globalTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
- // Add all tags as resource attributes
- for k, v := range tagMap {
- resourceAttributes.PutStr(k, v)
- }
+ processInfraTags(ialp.logger, ialp.tagger, ialp.cardinality, ialp.generateID, resourceAttributes)
}
return ld, nil
}
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go
index 8b7f5f14d8e49..481c9ad9c090b 100644
--- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go
@@ -7,15 +7,10 @@ package infraattributesprocessor
import (
"context"
- "fmt"
- "strings"
-
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
- "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/processor"
- conventions "go.opentelemetry.io/collector/semconv/v1.21.0"
"go.uber.org/zap"
)
@@ -37,95 +32,11 @@ func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagg
return iamp, nil
}
-// GenerateKubeMetadataEntityID is a function that generates an entity ID for a Kubernetes resource.
-type GenerateKubeMetadataEntityID func(group, resource, namespace, name string) string
-
-// TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method
-// entityIDsFromAttributes gets the entity IDs from resource attributes.
-// If not found, an empty string slice is returned.
-func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataEntityID) []types.EntityID {
- entityIDs := make([]types.EntityID, 0, 8)
- // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers.
- if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.ContainerID, containerID.AsString()))
- }
- if containerImageID, ok := attrs.Get(conventions.AttributeContainerImageID); ok {
- splitImageID := strings.SplitN(containerImageID.AsString(), "@sha256:", 2)
- if len(splitImageID) == 2 {
- entityIDs = append(entityIDs, types.NewEntityID(types.ContainerImageMetadata, fmt.Sprintf("sha256:%v", splitImageID[1])))
- }
- }
- if ecsTaskArn, ok := attrs.Get(conventions.AttributeAWSECSTaskARN); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.ECSTask, ecsTaskArn.AsString()))
- }
- if deploymentName, ok := attrs.Get(conventions.AttributeK8SDeploymentName); ok {
- namespace, namespaceOk := attrs.Get(conventions.AttributeK8SNamespaceName)
- if namespaceOk {
- entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesDeployment, fmt.Sprintf("%s/%s", namespace.AsString(), deploymentName.AsString())))
- }
- }
- if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "namespaces", "", namespace.AsString())))
- }
-
- if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "nodes", "", nodeName.AsString())))
- }
- if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString()))
- }
- if processPid, ok := attrs.Get(conventions.AttributeProcessPID); ok {
- entityIDs = append(entityIDs, types.NewEntityID(types.Process, processPid.AsString()))
- }
- return entityIDs
-}
-
-func splitTag(tag string) (key string, value string) {
- split := strings.SplitN(tag, ":", 2)
- if len(split) < 2 || split[0] == "" || split[1] == "" {
- return "", ""
- }
- return split[0], split[1]
-}
-
func (iamp *infraAttributesMetricProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) {
rms := md.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
resourceAttributes := rms.At(i).Resource().Attributes()
- entityIDs := entityIDsFromAttributes(resourceAttributes, iamp.generateID)
- tagMap := make(map[string]string)
-
- // Get all unique tags from resource attributes and global tags
- for _, entityID := range entityIDs {
- entityTags, err := iamp.tagger.Tag(entityID, iamp.cardinality)
- if err != nil {
- iamp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err))
- continue
- }
- for _, tag := range entityTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
- }
- globalTags, err := iamp.tagger.GlobalTags(iamp.cardinality)
- if err != nil {
- iamp.logger.Error("Cannot get global tags", zap.Error(err))
- }
- for _, tag := range globalTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
-
- // Add all tags as resource attributes
- for k, v := range tagMap {
- resourceAttributes.PutStr(k, v)
- }
+ processInfraTags(iamp.logger, iamp.tagger, iamp.cardinality, iamp.generateID, resourceAttributes)
}
return md, nil
}
diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go
index eca0363e4ab9d..f758e84556cb4 100644
--- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go
+++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go
@@ -37,39 +37,7 @@ func (iasp *infraAttributesSpanProcessor) processTraces(_ context.Context, td pt
rss := td.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
resourceAttributes := rss.At(i).Resource().Attributes()
- entityIDs := entityIDsFromAttributes(resourceAttributes, iasp.generateID)
- tagMap := make(map[string]string)
-
- // Get all unique tags from resource attributes and global tags
- for _, entityID := range entityIDs {
- entityTags, err := iasp.tagger.Tag(entityID, iasp.cardinality)
- if err != nil {
- iasp.logger.Error("Cannot get tags for entity", zap.String("entityID", entityID.String()), zap.Error(err))
- continue
- }
- for _, tag := range entityTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
- }
- globalTags, err := iasp.tagger.GlobalTags(iasp.cardinality)
- if err != nil {
- iasp.logger.Error("Cannot get global tags", zap.Error(err))
- }
- for _, tag := range globalTags {
- k, v := splitTag(tag)
- _, hasTag := tagMap[k]
- if k != "" && v != "" && !hasTag {
- tagMap[k] = v
- }
- }
- // Add all tags as resource attributes
- for k, v := range tagMap {
- resourceAttributes.PutStr(k, v)
- }
+ processInfraTags(iasp.logger, iasp.tagger, iasp.cardinality, iasp.generateID, resourceAttributes)
}
return td, nil
}
diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod
index b955e82f62a03..83515cafb48e7 100644
--- a/comp/otelcol/otlp/components/statsprocessor/go.mod
+++ b/comp/otelcol/otlp/components/statsprocessor/go.mod
@@ -25,8 +25,8 @@ require (
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
go.opentelemetry.io/otel/sdk/metric v1.31.0
)
@@ -75,8 +75,8 @@ require (
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
@@ -93,7 +93,7 @@ require (
golang.org/x/tools v0.26.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum
index bea707d5be0ce..ba8958d6623f2 100644
--- a/comp/otelcol/otlp/components/statsprocessor/go.sum
+++ b/comp/otelcol/otlp/components/statsprocessor/go.sum
@@ -134,34 +134,34 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
@@ -262,8 +262,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go
index 47b9fe0eeaff9..868b0b211fd74 100644
--- a/comp/otelcol/otlp/integrationtest/integration_test.go
+++ b/comp/otelcol/otlp/integrationtest/integration_test.go
@@ -43,8 +43,8 @@ import (
logtrace "github.com/DataDog/datadog-agent/comp/core/log/fx-trace"
"github.com/DataDog/datadog-agent/comp/core/secrets"
"github.com/DataDog/datadog-agent/comp/core/sysprobeconfig"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
"github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
@@ -135,9 +135,7 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err
orchestratorimpl.MockModule(),
fx.Invoke(func(_ collectordef.Component, _ defaultforwarder.Forwarder, _ optional.Option[logsagentpipeline.Component]) {
}),
-
- fx.Provide(tagger.NewTaggerParams),
- taggerimpl.Module(),
+ taggerfx.Module(tagger.Params{}),
noopsimpl.Module(),
fx.Provide(func(cfg traceconfig.Component) telemetry.TelemetryCollector {
return telemetry.NewCollector(cfg.Object())
diff --git a/comp/otelcol/otlp/map_provider_not_serverless_test.go b/comp/otelcol/otlp/map_provider_not_serverless_test.go
index 7992969678763..e82b30076f003 100644
--- a/comp/otelcol/otlp/map_provider_not_serverless_test.go
+++ b/comp/otelcol/otlp/map_provider_not_serverless_test.go
@@ -17,7 +17,7 @@ import (
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/collector/otelcol"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/otelcol/otlp/internal/configutils"
"github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil"
"github.com/DataDog/datadog-agent/pkg/logs/message"
@@ -1106,7 +1106,7 @@ func TestUnmarshal(t *testing.T) {
provider, err := otelcol.NewConfigProvider(mapSettings)
require.NoError(t, err)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
components, err := getComponents(serializermock.NewMetricSerializer(t), make(chan *message.Message), fakeTagger)
require.NoError(t, err)
diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod
index 3644428d34f52..eba9d1825808c 100644
--- a/comp/otelcol/otlp/testutil/go.mod
+++ b/comp/otelcol/otlp/testutil/go.mod
@@ -41,8 +41,8 @@ require (
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0
github.com/DataDog/sketches-go v1.4.6
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/pdata v1.9.0
- google.golang.org/protobuf v1.34.1
+ go.opentelemetry.io/collector/pdata v1.19.0
+ google.golang.org/protobuf v1.35.1
)
require (
@@ -63,7 +63,6 @@ require (
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/viper v1.13.5 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
- github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
@@ -104,8 +103,8 @@ require (
golang.org/x/net v0.30.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect
- google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/grpc v1.67.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/comp/otelcol/otlp/testutil/go.sum b/comp/otelcol/otlp/testutil/go.sum
index 9ac2f18af81dc..ca5eb780b40b1 100644
--- a/comp/otelcol/otlp/testutil/go.sum
+++ b/comp/otelcol/otlp/testutil/go.sum
@@ -22,6 +22,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -249,8 +250,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/pdata v1.9.0 h1:qyXe3HEVYYxerIYu0rzgo1Tx2d1Zs6iF+TCckbHLFOw=
-go.opentelemetry.io/collector/pdata v1.9.0/go.mod h1:vk7LrfpyVpGZrRWcpjyy0DDZzL3SZiYMQxfap25551w=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
@@ -381,22 +382,22 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
-google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/comp/process/agent/agentimpl/agent_linux_test.go b/comp/process/agent/agentimpl/agent_linux_test.go
index ca4849fedb5dd..bbfcf36a66081 100644
--- a/comp/process/agent/agentimpl/agent_linux_test.go
+++ b/comp/process/agent/agentimpl/agent_linux_test.go
@@ -17,7 +17,7 @@ import (
"go.uber.org/fx"
configComp "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
"github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
"github.com/DataDog/datadog-agent/comp/dogstatsd/statsd"
@@ -127,7 +127,7 @@ func TestProcessAgentComponentOnLinux(t *testing.T) {
runnerimpl.Module(),
hostinfoimpl.MockModule(),
submitterimpl.MockModule(),
- taggerimpl.MockModule(),
+ taggerMock.Module(),
statsd.MockModule(),
Module(),
@@ -190,7 +190,7 @@ func TestStatusProvider(t *testing.T) {
runnerimpl.Module(),
hostinfoimpl.MockModule(),
submitterimpl.MockModule(),
- taggerimpl.MockModule(),
+ taggerMock.Module(),
statsd.MockModule(),
Module(),
fx.Replace(configComp.MockParams{Overrides: map[string]interface{}{
@@ -234,7 +234,7 @@ func TestTelemetryCoreAgent(t *testing.T) {
runnerimpl.Module(),
hostinfoimpl.MockModule(),
submitterimpl.MockModule(),
- taggerimpl.MockModule(),
+ taggerMock.Module(),
statsd.MockModule(),
Module(),
fx.Replace(configComp.MockParams{Overrides: map[string]interface{}{
diff --git a/comp/process/agent/agentimpl/agent_test.go b/comp/process/agent/agentimpl/agent_test.go
index 0c06f5c7ef55e..5f36175977222 100644
--- a/comp/process/agent/agentimpl/agent_test.go
+++ b/comp/process/agent/agentimpl/agent_test.go
@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/fx"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/dogstatsd/statsd"
"github.com/DataDog/datadog-agent/comp/process/agent"
"github.com/DataDog/datadog-agent/comp/process/hostinfo/hostinfoimpl"
@@ -63,7 +63,7 @@ func TestProcessAgentComponent(t *testing.T) {
runnerimpl.Module(),
hostinfoimpl.MockModule(),
submitterimpl.MockModule(),
- taggerimpl.MockModule(),
+ taggermock.Module(),
statsd.MockModule(),
Module(),
}
diff --git a/comp/process/apiserver/apiserver_test.go b/comp/process/apiserver/apiserver_test.go
index 68b2eb22dbbc4..c9b0f4b96e92d 100644
--- a/comp/process/apiserver/apiserver_test.go
+++ b/comp/process/apiserver/apiserver_test.go
@@ -17,8 +17,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl"
"github.com/DataDog/datadog-agent/comp/core/status"
"github.com/DataDog/datadog-agent/comp/core/status/statusimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
@@ -34,8 +34,9 @@ func TestLifecycle(t *testing.T) {
PythonVersionGetFunc: func() string { return "n/a" },
},
),
- fx.Supply(tagger.NewFakeTaggerParams()),
- taggerimpl.Module(),
+ taggerfx.Module(tagger.Params{
+ UseFakeTagger: true,
+ }),
statusimpl.Module(),
settingsimpl.MockModule(),
))
diff --git a/comp/process/bundle_test.go b/comp/process/bundle_test.go
index 03fb70a4fc91e..591dce9f63cfb 100644
--- a/comp/process/bundle_test.go
+++ b/comp/process/bundle_test.go
@@ -18,8 +18,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl"
"github.com/DataDog/datadog-agent/comp/core/status"
coreStatusImpl "github.com/DataDog/datadog-agent/comp/core/status/statusimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
"github.com/DataDog/datadog-agent/comp/dogstatsd/statsd"
@@ -48,8 +48,9 @@ func TestBundleDependencies(t *testing.T) {
coreStatusImpl.Module(),
settingsimpl.MockModule(),
statusimpl.Module(),
- fx.Supply(tagger.NewFakeTaggerParams()),
- taggerimpl.Module(),
+ taggerfx.Module(tagger.Params{
+ UseFakeTagger: true,
+ }),
fx.Supply(
status.Params{
PythonVersionGetFunc: python.GetPythonVersion,
diff --git a/comp/trace/agent/def/go.mod b/comp/trace/agent/def/go.mod
index 3bb1cd345ab18..004eab16ea078 100644
--- a/comp/trace/agent/def/go.mod
+++ b/comp/trace/agent/def/go.mod
@@ -7,36 +7,28 @@ replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto
require (
github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0
- go.opentelemetry.io/collector/pdata v1.9.0
+ go.opentelemetry.io/collector/pdata v1.19.0
)
require (
- github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/knadh/koanf/maps v0.1.1 // indirect
- github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
- github.com/knadh/koanf/v2 v2.0.2 // indirect
- github.com/mitchellh/copystructure v1.2.0 // indirect
- github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
- github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
- go.opentelemetry.io/collector/component v0.94.1 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.94.1 // indirect
- go.opentelemetry.io/collector/confmap v0.94.1 // indirect
- go.opentelemetry.io/collector/semconv v0.94.1 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/otel v1.31.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.26.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/grpc v1.64.0 // indirect
- google.golang.org/protobuf v1.34.1 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/grpc v1.67.1 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
)
diff --git a/comp/trace/agent/def/go.sum b/comp/trace/agent/def/go.sum
index 112fbdf4a4de1..128c803790749 100644
--- a/comp/trace/agent/def/go.sum
+++ b/comp/trace/agent/def/go.sum
@@ -1,18 +1,12 @@
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 h1:hgbTFS6SkqbzOiWSfP58dZ/Jpjlmv6dpD4+V4LDHm2Q=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c=
-github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
@@ -22,22 +16,12 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
-github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
-github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
-github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
-github.com/knadh/koanf/v2 v2.0.2 h1:sEZzPW2rVWSahcYILNq/syJdEyRafZIG0l9aWwL86HA=
-github.com/knadh/koanf/v2 v2.0.2/go.mod h1:HN9uZ+qFAejH1e4G41gnoffIanINWQuONLXiV7kir6k=
-github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
-github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
-github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE=
-github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
-github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -47,14 +31,6 @@ github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
-github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
-github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
@@ -68,34 +44,30 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.opentelemetry.io/collector/component v0.94.1 h1:j4peKsWb+QVBKPs2RJeIj5EoQW7yp2ZVGrd8Bu9HU9M=
-go.opentelemetry.io/collector/component v0.94.1/go.mod h1:vg+kAH81C3YS0SPzUXkSFWLPC1WH7zx70dAtUWWIHcE=
-go.opentelemetry.io/collector/config/configtelemetry v0.94.1 h1:ztYpBEBlvhcoxMiDKNmQ2SS+A41JZ4M19GfcxjCo8Zs=
-go.opentelemetry.io/collector/config/configtelemetry v0.94.1/go.mod h1:2XLhyR/GVpWeZ2K044vCmrvH/d4Ewt0aD/y46avZyMU=
-go.opentelemetry.io/collector/confmap v0.94.1 h1:O69bkeyR1YPAFz+jMd45aDZc1DtYnwb3Skgr2yALPqQ=
-go.opentelemetry.io/collector/confmap v0.94.1/go.mod h1:pCT5UtcHaHVJ5BIILv1Z2VQyjZzmT9uTdBmC9+Z0AgA=
-go.opentelemetry.io/collector/pdata v1.9.0 h1:qyXe3HEVYYxerIYu0rzgo1Tx2d1Zs6iF+TCckbHLFOw=
-go.opentelemetry.io/collector/pdata v1.9.0/go.mod h1:vk7LrfpyVpGZrRWcpjyy0DDZzL3SZiYMQxfap25551w=
-go.opentelemetry.io/collector/semconv v0.94.1 h1:+FoBlzwFgwalgbdBhJHtHPvR7W0+aJDUAdQdsmfT/Ts=
-go.opentelemetry.io/collector/semconv v0.94.1/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/prometheus v0.45.1 h1:R/bW3afad6q6VGU+MFYpnEdo0stEARMCdhWu6+JI6aI=
-go.opentelemetry.io/otel/exporters/prometheus v0.45.1/go.mod h1:wnHAfKRav5Dfp4iZhyWZ7SzQfT+rDZpEpYG7To+qJ1k=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
-go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
-go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8=
-go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -147,13 +119,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
-google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/comp/trace/agent/impl/agent.go b/comp/trace/agent/impl/agent.go
index 39398d557701c..8ca8cf62d5140 100644
--- a/comp/trace/agent/impl/agent.go
+++ b/comp/trace/agent/impl/agent.go
@@ -24,8 +24,7 @@ import (
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/fx"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/dogstatsd/statsd"
traceagent "github.com/DataDog/datadog-agent/comp/trace/agent/def"
compression "github.com/DataDog/datadog-agent/comp/trace/compression/def"
@@ -63,7 +62,6 @@ type dependencies struct {
Context context.Context
Params *Params
TelemetryCollector telemetry.TelemetryCollector
- Workloadmeta workloadmeta.Component
Statsd statsd.Component
Tagger tagger.Component
Compressor compression.Component
@@ -91,7 +89,6 @@ type component struct {
params *Params
tagger tagger.Component
telemetryCollector telemetry.TelemetryCollector
- workloadmeta workloadmeta.Component
wg *sync.WaitGroup
}
@@ -111,7 +108,6 @@ func NewAgent(deps dependencies) (traceagent.Component, error) {
cancel: cancel,
config: deps.Config,
params: deps.Params,
- workloadmeta: deps.Workloadmeta,
telemetryCollector: deps.TelemetryCollector,
tagger: deps.Tagger,
wg: &sync.WaitGroup{},
diff --git a/comp/trace/bundle_test.go b/comp/trace/bundle_test.go
index 3a8d54f3d9904..a42de7e5a93ca 100644
--- a/comp/trace/bundle_test.go
+++ b/comp/trace/bundle_test.go
@@ -17,8 +17,8 @@ import (
coreconfig "github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
"github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx"
@@ -41,9 +41,8 @@ func TestBundleDependencies(t *testing.T) {
workloadmetafx.Module(workloadmeta.NewParams()),
statsd.Module(),
fx.Provide(func(cfg config.Component) telemetry.TelemetryCollector { return telemetry.NewCollector(cfg.Object()) }),
- fx.Supply(tagger.NewFakeTaggerParams()),
zstdfx.Module(),
- taggerimpl.Module(),
+ taggerfx.Module(tagger.Params{}),
fx.Supply(&traceagentimpl.Params{}),
)
}
@@ -73,8 +72,7 @@ func TestMockBundleDependencies(t *testing.T) {
fx.Supply(&traceagentimpl.Params{}),
fx.Invoke(func(_ traceagent.Component) {}),
MockBundle(),
- taggerimpl.Module(),
- fx.Supply(tagger.NewTaggerParams()),
+ taggerfx.Module(tagger.Params{}),
))
require.NotNil(t, cfg.Object())
diff --git a/comp/trace/config/config.go b/comp/trace/config/config.go
index 2d2e75f2c4110..e92e4b510034e 100644
--- a/comp/trace/config/config.go
+++ b/comp/trace/config/config.go
@@ -16,7 +16,7 @@ import (
"gopkg.in/yaml.v2"
coreconfig "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
apiutil "github.com/DataDog/datadog-agent/pkg/api/util"
"github.com/DataDog/datadog-agent/pkg/config/env"
"github.com/DataDog/datadog-agent/pkg/config/model"
diff --git a/comp/trace/config/config_test.go b/comp/trace/config/config_test.go
index 86c3c03833e3d..3010dc72fb3ac 100644
--- a/comp/trace/config/config_test.go
+++ b/comp/trace/config/config_test.go
@@ -31,8 +31,8 @@ import (
"gopkg.in/yaml.v2"
corecomp "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
apiutil "github.com/DataDog/datadog-agent/pkg/api/util"
"github.com/DataDog/datadog-agent/pkg/config/env"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -262,7 +262,7 @@ func TestConfigHostname(t *testing.T) {
fallbackHostnameFunc = os.Hostname
}()
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggermock.SetupFakeTagger(t)
fxutil.TestStart(t, fx.Options(
corecomp.MockModule(),
@@ -2245,9 +2245,9 @@ func buildConfigComponent(t *testing.T, coreConfigOptions ...fx.Option) Componen
fx.Options(coreConfigOptions...),
)
- taggerComponent := fxutil.Test[tagger.Mock](t,
+ taggerComponent := fxutil.Test[taggermock.Mock](t,
fx.Replace(coreConfig),
- taggerimpl.MockModule(),
+ taggermock.Module(),
)
c := fxutil.Test[Component](t, fx.Options(
diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go
index 77b7c5ccbde0b..fbfa318e30c72 100644
--- a/comp/trace/config/setup.go
+++ b/comp/trace/config/setup.go
@@ -22,7 +22,7 @@ import (
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
corecompcfg "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/otelcol/otlp"
"github.com/DataDog/datadog-agent/pkg/config/env"
diff --git a/docs/dev/agent_build.md b/docs/dev/agent_build.md
index fa05758d550f7..1896724f75403 100644
--- a/docs/dev/agent_build.md
+++ b/docs/dev/agent_build.md
@@ -52,7 +52,7 @@ Also note that the trace agent needs to be built and run separately. For more in
We use `pkg-config` to make compilers and linkers aware of Python. The required .pc files are
provided automatically when building python through omnibus.
-By default, the Agent combines multiple functionalities into a single binary to reduce
+As an option, the Agent can combine multiple functionalities into a single binary to reduce
the space used on disk. The `DD_BUNDLED_AGENT` environment variable is used to select
which functionality to enable. For instance, if set to `process-agent`, it will act as the process Agent.
If the environment variable is not defined, the process name is used as a fallback.
@@ -76,22 +76,6 @@ To disable bundling entirely:
deva agent.build --bundle agent
```
-One binary per Agent can still be built by using its own invoke task and passing the
-`--no-bundle` argument:
-- The 'main' Agent: https://github.com/DataDog/datadog-agent/blob/main/tasks/agent.py
-- The process Agent: https://github.com/DataDog/datadog-agent/blob/main/tasks/process_agent.py
-- The trace Agent: https://github.com/DataDog/datadog-agent/blob/main/tasks/trace_agent.py
-- The cluster Agent: https://github.com/DataDog/datadog-agent/blob/main/tasks/cluster_agent.py
-- The security Agent: https://github.com/DataDog/datadog-agent/blob/main/tasks/security_agent.py
-- The system probe: https://github.com/DataDog/datadog-agent/blob/main/tasks/system_probe.py
-
-So to build the process Agent as a standalone self contained executable:
-
-```
-deva process-agent.build --no-bundle
-```
-
-
## Testing Agent changes in containerized environments
Building an Agent Docker image from scratch through an embedded build is a slow process.
diff --git a/docs/dev/modules.md b/docs/dev/modules.md
index 458486743629c..13af79d47c204 100644
--- a/docs/dev/modules.md
+++ b/docs/dev/modules.md
@@ -25,18 +25,63 @@ After you have refactored, if needed, and listed the packages that you want to e
github.com/DataDog/datadog-agent/path/to/module => ./path/to/module
)
```
-1. Update the `DEFAULT_MODULES` dictionary in the `tasks/modules.py` file. You need to create a new module, specifying the path, targets, and a condition to run tests (if any).
- For example, if `pkg/A` depends on `pkg/B` and `pkg/B` is Windows only, we would specify:
- ```python
- DEFAULT_MODULES = {
- "pkg/A": GoModule("pkg/A"),
- "pkg/B": GoModule("pkg/B", condition=lambda: sys.platform == "win32")
- }
- ```
- The dependencies are computed automatically.
+1. Update the `modules.yml` file at the root of the repository. See the GoModule documentation [here](/tasks/libs/common/gomodules.py) for attributes that can be defined. The dependencies are computed automatically. Here are two example configurations:
+
+ ```yaml
+ my/module:
+ condition: is_linux
+ used_by_otel: true
+ ```
+
+ ```yaml
+ my/module:
+ independent: false
+ lint_targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ ```
## Go nested modules tooling
Go nested modules interdependencies are automatically updated when creating a release candidate or a final version, with the same tasks that update the `release.json`. For Agent version `7.X.Y` the module will have version `v0.X.Y`.
Go nested modules are tagged automatically by the `release.tag-version` invoke task, on the same commit as the main module, with a tag of the form `path/to/module/v0.X.Y`.
+
+## The `modules.yml` file
+
+The `modules.yml` file gathers all go modules configuration.
+Each module is listed even if this module has default attributes or is ignored.
+
+Here is an example:
+
+```yaml
+modules:
+ .:
+ independent: false
+ lint_targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ test_targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ comp/api/api/def:
+ used_by_otel: true
+ comp/api/authtoken: default
+ test/integration/serverless/src: ignored
+ tools/retry_file_dump:
+ should_test_condition: never
+ independent: false
+ should_tag: false
+```
+
+`default` is for modules with default attribute values and `ignored` for ignored modules.
+To create a special configuration, the attributes of `GoModule` can be overriden. Attributes details are located within the `GoModule` class.
+
+This file is linted with `inv modules.validate [--fix-format]`.
diff --git a/docs/public/how-to/go/add-module.md b/docs/public/how-to/go/add-module.md
index 5b244ad3ae449..4782c1d50fadd 100644
--- a/docs/public/how-to/go/add-module.md
+++ b/docs/public/how-to/go/add-module.md
@@ -27,16 +27,19 @@ The repository contains a few submodules. To add a new one and ensure it is test
```
-4. Add `mymodule` to the `DEFAULT_MODULES` in [tasks/modules.py](https://github.com/DataDog/datadog-agent/blob/main/tasks/modules.py):
- ```python
- DEFAULT_MODULES = (
- ...,
- "path/to/mymodule": GoModule("path/to/mymodule", independent=True, should_tag=False, targets=["."]),
- )
+4. Udpate the `modules.yml` file at the root of the repository with this content:
+
+ ```yaml
+ path/to/mymodule:
+ independent: true
+ should_tag: false
+ test_targets:
+ - .
```
+
- `independent`: Should it be importable as an independent module?
- `should_tag`: Should the Agent pipeline tag it?
- - `targets`: Should `go test` target specific subfolders?
+ - `test_targets`: Should `go test` target specific subfolders?
5. If you use your module in another module within `datadog-agent`, add the `require` and `replace` directives in `go.mod`.
diff --git a/go.mod b/go.mod
index 6089ad4ab18f6..183ec0fa0ecc4 100644
--- a/go.mod
+++ b/go.mod
@@ -39,6 +39,7 @@ replace (
github.com/DataDog/datadog-agent/comp/core/secrets => ./comp/core/secrets
github.com/DataDog/datadog-agent/comp/core/status => ./comp/core/status
github.com/DataDog/datadog-agent/comp/core/status/statusimpl => ./comp/core/status/statusimpl
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags => ./comp/core/tagger/tags
github.com/DataDog/datadog-agent/comp/core/tagger/types => ./comp/core/tagger/types
github.com/DataDog/datadog-agent/comp/core/tagger/utils => ./comp/core/tagger/utils
github.com/DataDog/datadog-agent/comp/core/telemetry => ./comp/core/telemetry/
@@ -105,6 +106,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/security/seclwin => ./pkg/security/seclwin
github.com/DataDog/datadog-agent/pkg/serializer => ./pkg/serializer/
github.com/DataDog/datadog-agent/pkg/status/health => ./pkg/status/health
+ github.com/DataDog/datadog-agent/pkg/tagger/tags => ./pkg/tagger/tags
github.com/DataDog/datadog-agent/pkg/tagger/types => ./pkg/tagger/types
github.com/DataDog/datadog-agent/pkg/tagset => ./pkg/tagset/
github.com/DataDog/datadog-agent/pkg/telemetry => ./pkg/telemetry/
@@ -136,6 +138,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ./pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ./pkg/util/system/socket/
github.com/DataDog/datadog-agent/pkg/util/testutil => ./pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ./pkg/util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/uuid => ./pkg/util/uuid
github.com/DataDog/datadog-agent/pkg/util/winutil => ./pkg/util/winutil/
github.com/DataDog/datadog-agent/pkg/version => ./pkg/version
@@ -152,9 +155,9 @@ require (
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.59.0
github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0
github.com/DataDog/datadog-agent/pkg/trace v0.59.0
- github.com/DataDog/datadog-agent/pkg/util/cgroups v0.57.1
+ github.com/DataDog/datadog-agent/pkg/util/cgroups v0.58.1
github.com/DataDog/datadog-agent/pkg/util/log v0.59.0
- github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.1
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.58.1
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0
github.com/DataDog/datadog-go/v5 v5.5.0
github.com/DataDog/datadog-operator v0.7.1-0.20241024104907-734366f3c0d1
@@ -196,7 +199,7 @@ require (
github.com/cri-o/ocicni v0.4.3
github.com/cyphar/filepath-securejoin v0.3.4
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
- github.com/docker/docker v27.1.1+incompatible
+ github.com/docker/docker v27.3.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/dustin/go-humanize v1.0.1
github.com/elastic/go-libaudit/v2 v2.5.0
@@ -223,7 +226,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/h2non/filetype v1.1.3
- github.com/hashicorp/consul/api v1.29.4
+ github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95
@@ -245,7 +248,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852
github.com/open-policy-agent/opa v0.70.0
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0 // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
github.com/opencontainers/runtime-spec v1.2.0
@@ -287,17 +290,17 @@ require (
go.etcd.io/bbolt v1.3.11
go.etcd.io/etcd/client/v2 v2.306.0-alpha.0
go.mongodb.org/mongo-driver v1.15.1
- go.opentelemetry.io/collector v0.111.0 // indirect
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/confmap v1.17.0
- go.opentelemetry.io/collector/exporter v0.111.0
- go.opentelemetry.io/collector/exporter/debugexporter v0.111.0
- go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
- go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- go.opentelemetry.io/collector/receiver v0.111.0
- go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect
+ go.opentelemetry.io/collector v0.113.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/confmap v1.19.0
+ go.opentelemetry.io/collector/exporter v0.113.0
+ go.opentelemetry.io/collector/exporter/debugexporter v0.113.0
+ go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
+ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+ go.opentelemetry.io/collector/receiver v0.113.0
+ go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/dig v1.18.0
@@ -454,7 +457,7 @@ require (
github.com/karrick/godirwalk v1.17.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/knadh/koanf v1.5.0 // indirect
github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect
@@ -503,7 +506,7 @@ require (
github.com/pierrec/lz4/v4 v4.1.21
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
- github.com/prometheus/common v0.60.0
+ github.com/prometheus/common v0.60.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
@@ -553,16 +556,16 @@ require (
go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 // indirect
go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.17.0
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0
- go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.53.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
@@ -597,20 +600,37 @@ require (
require (
github.com/DataDog/datadog-agent/comp/api/authtoken v0.0.0-00010101000000-000000000000
+ github.com/DataDog/datadog-agent/comp/core/flare/builder v0.58.1
github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0
github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.59.0-rc.6
github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000
github.com/DataDog/datadog-agent/pkg/config/structure v0.60.0-devel
github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.0.0-00010101000000-000000000000
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0
github.com/NVIDIA/go-nvml v0.12.4-0
- github.com/containerd/containerd/api v1.7.19
+ github.com/containerd/containerd/api v1.8.0
github.com/containerd/errdefs v1.0.0
github.com/distribution/reference v0.6.0
- github.com/go-viper/mapstructure/v2 v2.1.0
+ github.com/go-viper/mapstructure/v2 v2.2.1
github.com/jellydator/ttlcache/v3 v3.3.0
github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649
github.com/lorenzosaino/go-sysctl v0.3.1
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0
+)
+
+require (
+ go.opentelemetry.io/collector/connector/connectortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exportertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/memorylimiter v0.113.0 // indirect
+ go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processortest v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receivertest v0.113.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect
)
require (
@@ -627,19 +647,20 @@ require (
github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0-rc.6
github.com/DataDog/datadog-agent/comp/core/status v0.59.0-rc.6
github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000
github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0
- github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1
- github.com/DataDog/datadog-agent/comp/def v0.57.1
+ github.com/DataDog/datadog-agent/comp/core/telemetry v0.58.1
+ github.com/DataDog/datadog-agent/comp/def v0.58.1
github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3
- github.com/DataDog/datadog-agent/comp/logs/agent/config v0.57.1
+ github.com/DataDog/datadog-agent/comp/logs/agent/config v0.58.1
github.com/DataDog/datadog-agent/comp/netflow/payload v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.58.0
github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.59.0-rc.6
- github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.57.1
+ github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.58.1
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.59.0
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.59.0-rc.6
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.59.0-rc.6
@@ -655,23 +676,23 @@ require (
github.com/DataDog/datadog-agent/pkg/api v0.57.1
github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0-rc.6
github.com/DataDog/datadog-agent/pkg/config/env v0.59.0-rc.6
- github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0
+ github.com/DataDog/datadog-agent/pkg/config/mock v0.58.1
github.com/DataDog/datadog-agent/pkg/config/model v0.59.0-rc.6
github.com/DataDog/datadog-agent/pkg/config/remote v0.59.0-rc.5
github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0-rc.6
- github.com/DataDog/datadog-agent/pkg/config/utils v0.57.1
+ github.com/DataDog/datadog-agent/pkg/config/utils v0.58.1
github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3
- github.com/DataDog/datadog-agent/pkg/logs/auditor v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/client v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/message v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/metrics v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/processor v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/sds v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/sender v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/sources v0.57.1
- github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.57.1
+ github.com/DataDog/datadog-agent/pkg/logs/auditor v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/client v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/message v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/metrics v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/processor v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/sds v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/sender v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/sources v0.58.1
+ github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.58.1
github.com/DataDog/datadog-agent/pkg/logs/util/testutils v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/metrics v0.59.0-rc.6
github.com/DataDog/datadog-agent/pkg/networkdevice/profile v0.56.0-rc.3
@@ -727,7 +748,7 @@ require (
github.com/judwhite/go-svc v1.2.1
github.com/kr/pretty v0.3.1
// todo: update datadog connector with breaking changes from https://github.com/DataDog/datadog-agent/pull/26347.
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.111.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10
github.com/prometheus-community/pro-bing v0.4.1
github.com/rickar/props v1.0.0
@@ -735,16 +756,15 @@ require (
github.com/swaggest/jsonschema-go v0.3.70
github.com/valyala/fastjson v1.6.4
github.com/vibrantbyte/go-antpath v1.1.1
- go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0
- go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0
- go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0
- go.opentelemetry.io/collector/extension v0.111.0
- go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect
- go.opentelemetry.io/collector/otelcol v0.111.0
- go.opentelemetry.io/collector/processor v0.111.0
- go.opentelemetry.io/collector/service v0.111.0
+ go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0
+ go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0
+ go.opentelemetry.io/collector/extension v0.113.0
+ go.opentelemetry.io/collector/otelcol v0.113.0
+ go.opentelemetry.io/collector/processor v0.113.0
+ go.opentelemetry.io/collector/service v0.113.0
go4.org/intern v0.0.0-20230525184215-6c62f75575cb
go4.org/mem v0.0.0-20220726221520-4f986261bf13
k8s.io/cli-runtime v0.31.2
@@ -768,19 +788,18 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Code-Hex/go-generics-cache v1.5.1 // indirect
- github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect
github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.59.0-rc.6 // indirect
github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect
- github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.58.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/util/statstracker v0.57.1 // indirect
- github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.1 // indirect
- github.com/DataDog/datadog-api-client-go/v2 v2.30.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/statstracker v0.58.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.58.1 // indirect
+ github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 // indirect
- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
github.com/Intevation/gval v1.3.0 // indirect
github.com/Intevation/jsonpath v0.2.1 // indirect
github.com/Showmax/go-fqdn v1.0.0 // indirect
@@ -788,8 +807,8 @@ require (
github.com/agext/levenshtein v1.2.3 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
- github.com/antchfx/xmlquery v1.4.1 // indirect
- github.com/antchfx/xpath v1.3.1 // indirect
+ github.com/antchfx/xmlquery v1.4.2 // indirect
+ github.com/antchfx/xpath v1.3.2 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/apache/thrift v0.21.0 // indirect
github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 // indirect
@@ -799,7 +818,7 @@ require (
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
+ github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/cheggaaa/pb/v3 v3.1.4 // indirect
github.com/chrusty/protoc-gen-jsonschema v0.0.0-20240212064413-73d5723042b8 // indirect
@@ -812,7 +831,7 @@ require (
github.com/dennwc/varint v1.0.0 // indirect
github.com/digitalocean/godo v1.118.0 // indirect
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
- github.com/ebitengine/purego v0.8.0 // indirect
+ github.com/ebitengine/purego v0.8.1 // indirect
github.com/elastic/go-grok v0.3.1 // indirect
github.com/elastic/go-licenser v0.4.2 // indirect
github.com/elastic/lunes v0.1.0 // indirect
@@ -857,7 +876,7 @@ require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
- github.com/jaegertracing/jaeger v1.61.0 // indirect
+ github.com/jaegertracing/jaeger v1.62.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@@ -881,54 +900,54 @@ require (
github.com/moby/sys/userns v0.1.0 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0 // indirect
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect
github.com/openvex/go-vex v0.2.5 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
@@ -947,9 +966,9 @@ require (
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect
- github.com/shirou/gopsutil/v4 v4.24.9 // indirect
+ github.com/shirou/gopsutil/v4 v4.24.10 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
- github.com/signalfx/sapm-proto v0.14.0 // indirect
+ github.com/signalfx/sapm-proto v0.16.0 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/smartystreets/assertions v1.1.0 // indirect
@@ -970,48 +989,46 @@ require (
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
- go.opentelemetry.io/collector/client v1.17.0 // indirect
- go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configgrpc v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confighttp v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.111.0 // indirect
- go.opentelemetry.io/collector/connector v0.111.0 // indirect
- go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/nopexporter v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 // indirect
- go.opentelemetry.io/collector/filter v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0 // indirect
- go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
+ go.opentelemetry.io/collector/client v1.19.0 // indirect
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configauth v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configgrpc v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confighttp v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confignet v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configopaque v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtls v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/internal v0.113.0 // indirect
+ go.opentelemetry.io/collector/connector v0.113.0 // indirect
+ go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/nopexporter v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/zpagesextension v0.113.0 // indirect
+ go.opentelemetry.io/collector/filter v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0 // indirect
+ go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect
go.opentelemetry.io/contrib/config v0.10.0 // indirect
- go.opentelemetry.io/contrib/zpages v0.55.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
- go.opentelemetry.io/otel/log v0.6.0 // indirect
- go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect
+ go.opentelemetry.io/contrib/zpages v0.56.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect
+ go.opentelemetry.io/otel/log v0.7.0 // indirect
+ go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
@@ -1078,3 +1095,6 @@ replace github.com/google/gopacket v1.1.19 => github.com/DataDog/gopacket v0.0.0
// Remove once https://github.com/kubernetes/kube-state-metrics/pull/2553 is merged
replace k8s.io/kube-state-metrics/v2 v2.13.1-0.20241025121156-110f03d7331f => github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241108192007-8859a4289d92
+
+// Remove once https://github.com/Iceber/iouring-go/pull/31 or equivalent is merged
+replace github.com/iceber/iouring-go => github.com/paulcacheux/iouring-go v0.0.0-20241115154236-2c7785c40a0f
diff --git a/go.sum b/go.sum
index f63ad1b97cf58..6d8b56460b245 100644
--- a/go.sum
+++ b/go.sum
@@ -125,8 +125,8 @@ github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3 h1:SobA9WYm4K/MUtWl
github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/DataDog/datadog-agent/comp/core/log v0.56.2 h1:qvBT+FfjKGqimyEvmsNHCZKbTfBJAdUZSVy2IZQ8HS4=
github.com/DataDog/datadog-agent/comp/core/log v0.56.2/go.mod h1:ivJ/RMZjTNkoPPNDX+v/nnBwABLCiMv1vQA5tk/HCR4=
-github.com/DataDog/datadog-api-client-go/v2 v2.30.0 h1:WHAo6RA8CqAzaUh3dERqz/n6SuG2GJ/WthBkccn0MIQ=
-github.com/DataDog/datadog-api-client-go/v2 v2.30.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc=
+github.com/DataDog/datadog-api-client-go/v2 v2.31.0 h1:JfJhYlHfLzvauI8u6h23smTooWYe6quNhhg9gpTszWY=
+github.com/DataDog/datadog-api-client-go/v2 v2.31.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
@@ -189,8 +189,8 @@ github.com/Datadog/dublin-traceroute v0.0.2/go.mod h1:k2H1x9n5hEVXV7BnEhf3J7Y9A1
github.com/DisposaBoy/JsonConfigReader v0.0.0-20130112093355-33a99fdf1d5e/go.mod h1:GCzqZQHydohgVLSIqRKZeTt8IGb1Y4NaFfim3H40uUI=
github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7 h1:AJKJCKcb/psppPl/9CUiQQnTG+Bce0/cIweD5w5Q7aQ=
github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7/go.mod h1:GCzqZQHydohgVLSIqRKZeTt8IGb1Y4NaFfim3H40uUI=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Intevation/gval v1.3.0 h1:+Ze5sft5MmGbZrHj06NVUbcxCb67l9RaPTLMNr37mjw=
@@ -264,10 +264,10 @@ github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
-github.com/antchfx/xmlquery v1.4.1 h1:YgpSwbeWvLp557YFTi8E3z6t6/hYjmFEtiEKbDfEbl0=
-github.com/antchfx/xmlquery v1.4.1/go.mod h1:lKezcT8ELGt8kW5L+ckFMTbgdR61/odpPgDv8Gvi1fI=
-github.com/antchfx/xpath v1.3.1 h1:PNbFuUqHwWl0xRjvUPjJ95Agbmdj2uzzIwmQKgu4oCk=
-github.com/antchfx/xpath v1.3.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
+github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA=
+github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA=
+github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U=
+github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
@@ -410,8 +410,8 @@ github.com/blabber/go-freebsd-sysctl v0.0.0-20201130114544-503969f39d8f h1:gMH+l
github.com/blabber/go-freebsd-sysctl v0.0.0-20201130114544-503969f39d8f/go.mod h1:cTRyHktEaXkKTTEyZ0hAgS7H4V0AVoKhB8Dx0tVr/tY=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
-github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
-github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
+github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/bool64/dev v0.2.34 h1:P9n315P8LdpxusnYQ0X7MP1CZXwBK5ae5RZrd+GdSZE=
@@ -480,8 +480,8 @@ github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGD
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ=
github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
-github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
-github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
+github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
+github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
@@ -521,8 +521,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
-github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
+github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
+github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -564,8 +564,8 @@ github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2
github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
-github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -582,8 +582,8 @@ github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9Tzqv
github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
-github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
-github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE=
+github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U=
github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64=
github.com/elastic/go-libaudit/v2 v2.5.0 h1:5OK919QRnGtcjVBz3n/cs5F42im1mPlVTA9TyIn2K54=
@@ -750,8 +750,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -929,10 +929,8 @@ github.com/h2non/filetype v1.0.5/go.mod h1:isekKqOuhMj+s/7r3rIeTErIRy4Rub5uBWHfv
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
-github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
-github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
-github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
-github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
+github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
+github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
@@ -1031,8 +1029,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
-github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90 h1:xrtfZokN++5kencK33hn2Kx3Uj8tGnjMEhdt6FMvHD0=
-github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90/go.mod h1:LEzdaZarZ5aqROlLIwJ4P7h3+4o71008fSy6wpaEB+s=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -1056,8 +1052,8 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
-github.com/jaegertracing/jaeger v1.61.0 h1:9PCP5vkilmoxmSHa9jFvtULoduFJqJ0/bHbRxUMPWTc=
-github.com/jaegertracing/jaeger v1.61.0/go.mod h1:DMy9PNQ7tOgo811jOv7UAQaM0NeSJ95lh6SW3O1s1Xk=
+github.com/jaegertracing/jaeger v1.62.0 h1:YoaJ2e8oVz5sqGGlVAKSUCED8DzJ1q7PojBmZFNKoJA=
+github.com/jaegertracing/jaeger v1.62.0/go.mod h1:jhEIHazwyb+a6xlRBi+p96BAvTYTSmGkghcwdQfV7FM=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
@@ -1119,8 +1115,8 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCy
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d h1:RnWZeH8N8KXfbwMTex/KKMYMj0FJRCF6tQubUuQ02GM=
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d/go.mod h1:phT/jsRPBAEqjAibu1BurrabCBNTYiVI+zbmyCZJY6Q=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs=
@@ -1369,120 +1365,120 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.111.0 h1:GZgIPUBQisxljpN9hLHD4X8eNUBOXZFP+4s3Hwn5YY4=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.111.0/go.mod h1:CDXM/Xtcn/sl9td4K/uXIh2FlXqOVQtKJef/524LF8o=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0 h1:9rSlNU6xUEcgneB7Pm502VMH63Abc8Ibpd9y0fBit3Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0/go.mod h1:J87FjckPF9jl1MLA36Yemp6JfsCMNk0QDUBb+7rLw7E=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.111.0 h1:54+URzI0DLo1GtZDXIGS7yhatBlpDIHhN0OnOe7sS30=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.111.0/go.mod h1:gV470n8m0uMMNxz+qEpbIl36bpFQOhAeoNTx3Z/hHu8=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0 h1:p8vV11sj1cJFbd3B9tuGiA9gMGTvaSR4A57qQvVs9iY=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.111.0/go.mod h1:sBcqg1DlNC6V8e455vASnIrr8LJX7stQ8V3wlbCsIhM=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0 h1:65bd8qYi83LkBrhWEfw0d46p71YBZmPHoIvx/+DJ0cI=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0/go.mod h1:P7L8iofBjb57TW/7NlmAn68fs6ayzhNJRzIO2d+UwhI=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0 h1:59r33b8JeJUCQElz57S7AbwqUfQ1f1FVVjgyjiZp7Ec=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0/go.mod h1:4/axxWsIgRRptIg4olabh6ZXNL0Xt0Qjpaij8mD+dt8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0 h1:TcJ6/abaapOCRP0EfMaWbLavFF05dyFe+i99k4ABumM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.111.0/go.mod h1:betAqGGfDFb8SLyMYBwH9BQyB9wzxWOWXXC/Ht6/kas=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0 h1:w+WuYgdPpak1XzQh+RUXGaTiwBzIOlL+xfg1eE0XatM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0/go.mod h1:augVLlrOmDTXR/TcpG4ZkpnTsh629dmrCkgW5zpVySE=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0 h1:Lb2NxWlKjDxGpWRvuroGTxPTk3zbTM6DsRZoN/lHJYM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0/go.mod h1:asw3mhiAUu9Vv8QRVRTnTB6at2OVP7u9ucDjX7K/yBw=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0 h1:SyRe1UcR+D5KQvgbcfMfBwf/6HSwggptgTaDlRXMuXc=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0/go.mod h1:Z9hwMuYMYlL6GN6zEDhBxiejJZrjjdb492J3TdlrWf4=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0 h1:L6xShMvEZcWtKGguMkUcem6EDaJXVT4nN8FAkUfiPsA=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0/go.mod h1:myfv37ZXoLD5aO6qp2sjwKUiEopLIbwOmCBvC+fjGj4=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0 h1:tDZgAMfdxJxYOnNg0U5q2F+0ATri6IVpiE7XOtoLRI8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0/go.mod h1:huTYwd0Qjl2xjPtgHVwOW27UEY19Zdh8pJDf+JvLC+I=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0 h1:m/u2iRttl/nEjp0EZ9w371LLAqogw0tDn+wPU6D7ArY=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0/go.mod h1:mmkCl306sRZYt/7uNmjvuRRvbe/xUDSDm8fEAGSSMKI=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0 h1:n1p2DedLvPEN1XEx26s1PR1PCuXTgCY4Eo+kDTq7q0s=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0/go.mod h1:PdhkBzDs6Qvcv3lxNQw0ztrGaa1foNBODrF2v09zlyA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0 h1:5tERPDm3N3lTHWwSAK1KsGLc8/oi6HtjvLvrP21oZMM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.111.0/go.mod h1:J1NJse8mJrVz3HeMoSRH3qAU5/y0cBRVf1Acl/lWVz8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 h1:QhEwQTGTXitMPbmyloNfLVz1r9YzZ8izJUJivI8obzs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0 h1:AviHrU5O4Dho+/4Jb4zQ4A3gYAxBhy3RwYQuZY8bXkM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.111.0/go.mod h1:0gILoAExLgqNNcSsLxIRPUlLfDP+OKbDk3cTpB3l73Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0 h1:AFzcAfNereWXW8SP5rPtslxv8kNo3LCnnCjUzl7ZCVM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.111.0/go.mod h1:fEtKy/bUTeRKDblbFM9IyIA/QjhepmPs36TtjO1N7mo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0 h1:jKLX/Ojg8bcHtUaoS585CMbvNJdEFHZjdx233SRdf3s=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.111.0/go.mod h1:Sw1fPP1MkfGFoq1gnbLap5hdH1aoRUCVF5nrLymHa90=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.111.0 h1:yBslPtBsJq0Ftf0w+wEV1NHMZhpqFm9dh5z7IlrmVBI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.111.0/go.mod h1:5gQLQLJ4AuMKnffhE1dBs86TAJeF52x7FVwibZ7o4YY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0 h1:kKfYR5GCVeLfkjZWMYZtnvv7NqKY9M1NaZUKVXye+2A=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.111.0/go.mod h1:tgQHCvogGlsnxQgtyhy+OwvBF4FDmK8dPlxs6nahdWs=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0 h1:g9U+7hjEm1yUgaO1rJxstfLW7aEeo3S1tUyyvMlf7A8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.111.0/go.mod h1:tL9m9RF+SGLi80ai1SAy1S/o60kedifzjy0gtGQsnmY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0 h1:WUvjZUaII7vSxGqRZAKYLiBY4yIZuZHiUYNmMktcAgA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.111.0/go.mod h1:TJzFbof2CdM6/VJgOzNssq5Pe+ewGizrha4QfOK4bwA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0 h1:lkLEZTKVjTVRJlyLPlZbS5JPCJQXT+eRo25WM2Jirk8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.111.0/go.mod h1:NNkYGRH4ADBR7XSrto2bP2TIZlVJsBSyNMtsjpWUfio=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0 h1:6AeBTgAQGW/0q7c2UBAUTqu5+Zq/tivzrcYEJQQrOB4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.111.0/go.mod h1:iufc35mK+M7hc5Z7BzVE3DGH8E6eJjgeyU78CKUVJDQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.111.0 h1:3QnfNnyA6OmgR0UDj6Q5zGblN5qxWMdZHVnOjVrBkD0=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.111.0/go.mod h1:1AVaeAjt+WT6D+PfIVS9C8IGSiq8FFj0murp0TFbSSo=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0 h1:7oG2+U/9dlbz0T3uvjt71eoY6vpLrnkswm/aLQi9zBw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.111.0/go.mod h1:DXg1nWKUfkudDIYg3PB62EZH/DcHzOC22QB85TOE3BA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0 h1:0MJmp4O7KUQOUmQYJEGNgtf30Nhx/3nLMn0jnU4Klhw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0/go.mod h1:4PYgwpscyZUUdQVLsd7dh+LXtm1QbWCvU47P3G/7tLg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0 h1:W0SthymNSB2fzGuY2KUib6EVyj/uGO3hJvaM6nW0evE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.111.0/go.mod h1:GQHN6IbBsaGmMJIOQcqA7RXiJi55rXldP3di5YJ1IYA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 h1:Ld/1EUAQ6z3CirSyf4A8waHzUAZbMPrDOno+7tb0vKM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0/go.mod h1:wAOT1iGOOTPTw2ysr0DW2Wrfi0/TECVgiGByRQfFiV4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 h1:RSbk3ty1D9zeBC/elcqVdJoZjpAa331Wha99yNHnH6w=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0/go.mod h1:iDBwbN0by4Y75X6j5PuRoJL5MpoaDv0l7s8dHFQHJPU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0 h1:7DqvnAOXIPv6PEKA347VXACc07E1utEWcjuxsY4YOXA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.111.0/go.mod h1:6hlplIB2LiSciMabYB5IpwrBn3Hl/P8JakNm0uNkVug=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0 h1:TnAhTFTwmJzFq6vVcf57lnRzAp+rNx5tEyrMudtDGsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0/go.mod h1:l0CUp7vTH+Wv0tF5PYaHpPn1dLiVuMRAMqbBgXFpz54=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0 h1:c8dHCbDk8RNYC8x2Go+yatwQCK8zgk3i6FmT3p0JJec=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.111.0/go.mod h1:c1ZBGwAPwSzJqWFnyAygLCbFO1GcMzpiwwqyfvYL1IU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 h1:BCev4nJfHH2u9AsWFfxR1o1Vt5HoW9myN4jaktZInRA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0/go.mod h1:xJ8w6JN/tfRpUXTU6jx/bYmTIcy7OTz7PVFVR/SdqC8=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0 h1:EXgr2fMBJJFnmw3GVRD2fhX3Dqq11g1IoUsrhWfcDn4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.111.0/go.mod h1:uDdKEduyex67rYq75wyUJC1Wl0QhrqBE09WDa1SznMA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0 h1:9vE440Q98eNLd/estFIDgX1jczzU978yGarFLIMieEU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.111.0/go.mod h1:HK8p16A0OoXqhehCTW3QxgWNeshuIDucGUpGwpw88Og=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0 h1:h5TnZkApRY8MbauD64R2CXKY3SvkjL3+H0xzdee8Yx0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0/go.mod h1:5sDugbmzTH9mwv+/bHHeDh3GxG2OFcgsBNvAeb5HQS0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0 h1:G5aPa8QaH114z2l6mLPDsFLnZIp/gEMYnOZ3ePt6Rs8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0/go.mod h1:cqLqEaIRSmik2ayXSeHjlhQST0FumictNqM30KNwUU8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0 h1:HY9tieXpiyNQYPVzRR4uzBuAmyWOqwHUcYSKD2a0wxU=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0/go.mod h1:H9N5NbDS3ZIsERRBxZaUoM+F5tM3Uphuw/757T1HM3Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0 h1:B6o+FR0+9zPhjX97ABREAlHrqLKJCOodrgh4UoYWvHs=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0/go.mod h1:FCRWxxbf+uIXnz1Q3vsOQkzsw30aA6x9ylaYXhzX8jM=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0 h1:PPivenkwOU4XDjuGhU24d4dF4luu20RZeV+arB53wnA=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0/go.mod h1:+FGkQ0QQbJnNDiXme+GhH1HJybOEaxpmPNBQC/j5DEo=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0 h1:UUo4VOhBMIm1ZTs9xmZO4IFazLHsjUZnPkS0+q7qNL4=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0/go.mod h1:5Ntpnh1KsrfOnPVUWCk/lte4Gma12bJPU8EhBS3AKSE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0 h1:esaZ1MYqKA6dNkrFk4U0xhX7E2E/Wht4WBYWjTXexbo=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0/go.mod h1:MI0kHmeMSQxG5ZDz3gU3k3KZNRdULzbKdareO7KDGE0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0 h1:Ce2Ucsp+DOk6OTYsAp8ocQ0NbGPkYXYDlIp/XJeeMck=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0/go.mod h1:k49ONUacPMoCrfUpBJES5MdriG90hvcDKvr9abTItRc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0 h1:0yYcJnw0vXRhRGHX0BFkN8L1L4xf5NsPVgTVOgjb8k4=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0/go.mod h1:gJHCfB2sgjKPxxBVHNgpL/gI8dSgonj2k4HGeyadxe8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0 h1:DF+kp5Gtg5B95VncWJb1oOIvf9PGpZ/gxWAHLdIrTEk=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0/go.mod h1:UzFds6+yARb/SHnC93hMeGWKJIDA131nm2dxZW+kTsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0 h1:KkHeODEukk2RveIEHvV5dPe06oA2PKAKbpjVZPtCRsQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0/go.mod h1:Ijvd5VMB2tstz3+3BiQy5azewQ31N4fytMFNdo8dLWE=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0 h1:iQmn0AXvTu5VV/YxW5HncVm3gapV6+PA4a5NrJVA2+M=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0/go.mod h1:CVYv1VaSYvQFmeRCDXvq0lfE+MjVuuxGqz8i6OYJGO8=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0 h1:njXy1jWZIB/FGgH5PuD7GEFijog+dIHKkCk0/KK3ie4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0/go.mod h1:AlwQ/GcxemglIOsq5Hwhhec65zB69KCwLF3ReL9fDXQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0 h1:QTTf31jE0IyIf1fjZSdSCEZXWPQh0RK6wlF6seGcGsc=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0/go.mod h1:YBQziYW63U+PpizgL6FdslXC4qTsB4azIn40ZTQHkkI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0 h1:aExPDvXM72YkrpUr/zzHisB2hthAglMyyMakXSoAjCI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0/go.mod h1:Bbb0kvytjDWKPuvw26nY/+FDqdtUEXNpwqoefS1efrI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0 h1:abeLe2WANVWpnNua41Aa+RTmYYGs0gk1oQRd2/XH7Uo=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0/go.mod h1:Nij85WmJr/+q0HeAvGulEYxFE+PMlhFelPWN6yzCuuw=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0 h1:XIgynRPC/r2x+pc+f2VbtAdBsueejnhA9zBE/bmXL/c=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0/go.mod h1:xVSuQG3cKqWa/jp7wLviJ00CEK0qU0HCp+a6u8G7m9c=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0 h1:ZL2MHUllnnScOOGUw47XxzXTPv2f9FD4iQMmpQ+Y97E=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0/go.mod h1:ethl7HjfN9VGHPWDrfiLAYHPrfUAYxk66tZT5841Uq8=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0 h1:WJfe78FxmmshTWilSpwtDRHoOl8gxKAnTW0eT4kureY=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0/go.mod h1:XkFc7X0M2hnM4AYg6yX+r7btu208RG8THfM/npF/eKQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0 h1:f3PXc+bgwGtJIlWwtOXDYYNBKOyabhkLl6Q3j/Rc1ow=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0/go.mod h1:Ix1L1Godr9TS2DvHWSz/+0RBPa/D34azSvk/xU3dlGs=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0 h1:ITFlE4UHWUQQg5Vy7XfaRaE7hADsK3UTtEJ5xrPbWU8=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0/go.mod h1:tH0inPPuK/JYMDlLTe7ioGN1Zbp3NbNSp8H0Vc5C+uU=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0 h1:sYEmpMeBGMSoy8j1GSEkBO5GM578Qtq3QtmVcsYn51s=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.113.0/go.mod h1:ULlXfiJdXw0ZzUGXLToQvGb1gQuIGC2BjtfyTnw3QtE=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0 h1:QTCu/YoA37p2Kf81Bc/h5TM70K8O+E/gMBc9vCkJrUQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0/go.mod h1:f3Lwdfnyzf7IY/gFXiRnSPMWGLZv17r+GJuTwnZkQL8=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0 h1:Aej9sL1v25Xf8AfM1fyRluBLV5g5+40GnagCb0/UJfY=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0/go.mod h1:QiXedkQif06vbjtVgnmmrHOunLUoLLAf10uaA/qKgts=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0 h1:DLVAun5qoedOzWZ1+yoZRuGj0RonhrGAqdAOO7k6k+A=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.113.0/go.mod h1:betAqGGfDFb8SLyMYBwH9BQyB9wzxWOWXXC/Ht6/kas=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0 h1:Z6Y34pWmm/be0D5QCirBLEoMf7K9ObRPkMMD8bt4Ce0=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0/go.mod h1:LZ3Wbuyz2MNNAj3bT9u7QUt21glx2FWE26b0EjWKWnc=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0 h1:hJfn9iDpcmaeYCBJvwAhmH4prq2Rdr+hWizEIKWaXmg=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0/go.mod h1:tImtwJ0mSfqoPycKMDGFZcVBGi+8KnBTmBSMHsGSTkU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0 h1:Z4gWWk5N3ZBJlAx0hRm2sDUxlW8qK7dVoRbWMUnKiOM=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0/go.mod h1:FB+Xi0xKwEqTCshu2SGykG2LXRvg+5ZYR3jymz6+Mtw=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0 h1:+kM285dDDP69EfG6lB+A8nO98wtYrXT/afxlIzk5+IU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0/go.mod h1:X6/2QgHXQ73vvs1C5LEMyifUknLa71E27hUcbTY5vRo=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0 h1:9xbLur+qeUwlKDrx9LGI9fvypussD2E00q6QFkkGpGo=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0/go.mod h1:l58b3KahydKLOzt7S0s0NYBYH9Nm8tZ4w/GIVvOLCWU=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0 h1:hc407moydGsK9FfAxjP3Tw+akhmKO8PfaH18II3N7Q4=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0/go.mod h1:+1IJOoUqBzghufMZDSMhKzs1UOi39h8pMFDxWm/k1k4=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.113.0 h1:ERdOiTmsDruI/s5oEgN45NsZW2roWXmO0u2aceR4GuM=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.113.0/go.mod h1:RkClsQhl8hdAg874Ot4kaG92s+6dW0Dvlt5HRxhsavc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0 h1:qudJNiKFfxly/lPyfdZNwnT6OKCzRFw0BI0E5CI6WwU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.113.0/go.mod h1:eHVWQ484ohG4ZjaV8KTej3CMVEPh0w6zBXfi+qqvyGw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0 h1:7A8MgFPYRQWq1RkFBktq01CW+eTYhiGML0IxQNv2uaM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.113.0/go.mod h1:E1pc7mDXH+5s7RyXw291h8lz2dhzPzaDrAHqP1Lawvw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0 h1:EZ/ZNsovNcQq+wwAbTAWNY+6BHnv24NxvVoC6eYmtg8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.113.0/go.mod h1:u21dEQ9yQ0JyLMSrKLWWzHG/lHSlteNfa/EQ7Vqcle4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0 h1:462BO6mxAJKJdlqxs3swj9RtebQNeHXp2g7IK/N7+Zc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.113.0/go.mod h1:aFfi8Vz+pIYXYxrx9rDP2Rhduac7mrjUYEAI/0GUIl4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0 h1:SjzsWZqrxUoRWvXzwrbjQCnoJRJApJVCCqjrtflapMM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.113.0/go.mod h1:sYIh0S63ztcL2q9gEKhvviDQ5caH1sFE1oeFRDQOQ6A=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0 h1:E/D5TwJyKN19p1FQ0XD5r5G1uH9NH/HVAM0e1hFMwFU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.113.0/go.mod h1:FcClDm9XVV5tzUDzmH2Mhe6TfYiZ/3GSAQITnuCjZgg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.113.0 h1:ZQ7HYLrsbbfrIYybOVDG4d1IS4PfxuZEll7lvLmcYDs=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.113.0/go.mod h1:2/6/eY8Uvg+NfYDsAbND96A4u5q4UjcDlBJolYcj6jE=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0 h1:BidrOROxYyacsUzNJyPZdvuX9VpbmFnSJXAt0yz6cXU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.113.0/go.mod h1:TM5DUkjqHozcpjCX36f7cDv6Rv+J8ysZ52zCYAEQZCk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0 h1:V9CRl77lPG2xFPpnRf1QLiePo7FZngt+vw6M2KLdRMU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.113.0/go.mod h1:zL+Msnlb1TEObHQ2RNnPKbVr3GhSdyI2ZqGtiSxg2/E=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0 h1:SVvBEMYFwb+vq/bNg9TVHNCaiFYb79B8Ce2z0/sWBgc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.113.0/go.mod h1:lRfw7YDJE82nmdTO14Sk5rzkFJPHyH1iAnWIkjyQgQk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0 h1:gMaV3vZTFJU/B/g/2kKjbHn+LcIIsN4MhGSHi6/ZaFk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.113.0/go.mod h1:iES2YMgH43z6KdlWnTWiZwWY3cyAL/GJOzCEbD7nGkI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0 h1:wKyHS2Vly/qhEEKYsKtRqprZko9hZd+jtmn3TAMrZZU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.113.0/go.mod h1:lFOHzTWx4ozV2x/vRWBgu7gC0rkkX6EMdQkyIxLL2zI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0 h1:sfPQ3RPyimzEzB2aQtUaEu7ElwDmlze+q0moWV9YpkI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0/go.mod h1:QMenHMTJ5qrpghfPoMMpS0QwW6pQrXOqHn7QcNgn+NU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0 h1:T3KHOKgNbWKD6gx5R2D4p3tg+0p4lIrxyf+4iy0Yxts=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.113.0/go.mod h1:VqC1uSDXtgrEuqyyJuYp7G8Sr2FY2QtP3pN9a7cTueA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0 h1:5YU2trp7n56EyDS9dEyY1UxyaW6wxB4KiyKoyjDYooo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.113.0/go.mod h1:EBtBK1lE/HMUz51cafBLlJAXZ/2ZDRCV4C+rT04fMYM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0 h1:e2WOkaj5AWPaKTU4l+GEXGrEUbrAhQPQ7zLUdnXLGX8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.113.0/go.mod h1:x+DR/o7xddbdhpQP2QKBJkPUdrj2tl/uR1OJ/sqlrWc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0 h1:GERkhEtH3Uk8CMBzFoBmMD7fBfcrtIM9hopbQqzdvNs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.113.0/go.mod h1:+VbefhdCgKiTXsIU6sQj9L96Ow53a8EMcUW6EMt3zTA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0 h1:Ux4k3jMTufk4HZ4RNYrqLxIt6wKEeOFAndzGWBjiUqs=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.113.0/go.mod h1:GWsSVmzpKZucOefuqqvKEUbnqGRV9OCSX2vzTjC/sbI=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0 h1:n44G0Quw+OQMZ+ELOo/Aw1qcwVu7LXae8GBVjVSE+HQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.113.0/go.mod h1:6dxGDpWsWnzC5UK3RhgfdXRC+3c8RLCO+cC+RiS+jIU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0 h1:Qy0MqQQKmW9wrfduM794WKg4qjTobIdj5HDHW5FZ/P8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.113.0/go.mod h1:X25Nhlw6xhuNSd/C0FeEwmD4PGmcXDl7pa2jR0UREkU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0 h1:G8w+wg4nnqBqe297fBWnjJ5Tg2OYDVEMsdWA9/3ozxQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.113.0/go.mod h1:m3hDVsXPQzQfeji3+hn7NYJPHDRlHhQRNd5T7N5wZqc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0 h1:mFYOvag34kGXceVj29k0ZpBUyjEX7VZq+KctUSfNiG0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.113.0/go.mod h1:54P38b2i1CgHvZLxD3EAzVccqreamGEz2U4pqy9DuHw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0 h1:vKtNSM3VQBTJx1ecf+I1iqn4kj7fKif1SpBLQ+numf8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.113.0/go.mod h1:Iw3ndTvDCbmN6uugOfIqmfb1pgCWTYg+tXXS2rr3QJo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0 h1:XzI7y1bC29bnJ9VgyA0JCws0e/rIyt7yteT5gGLe6nw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.113.0/go.mod h1:OxdhzDFd7/cRck6LeWLF2TUC/QhwoJSUlw35NuVbvzA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0 h1:4fkpWb4wG1RWr9C5M8IbxcBwq9qGlqf5bbp3bxRXOzA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.113.0/go.mod h1:yAl+wNAt18sGPizvODIHue12echxjpFL2SEQvUksN5M=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0 h1:z8DT+vL/NfRN2hpacLIqtCMcInFrM01CY9LtoFJq+jQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0/go.mod h1:U0wBuG6Jz+DBzcPNCmRVZaZTXqaKC+RYo4eJiSKJwwk=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0 h1:VHt8tWPRPzPjl2AzO6tAd86yboX1UDDFkBm6oDVNAoI=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0/go.mod h1:r5DetWqG2vclbHNAYp4a+Kg5i7ZAfcRFez5bliTLDr0=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0 h1:+eYxV9vp6u8fKM+9acEJYGUa3SD1vJF776c/haougNQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0/go.mod h1:xSVeb2A5wmIuJ9Vak9UwPCP/yN1SDd+pBKfYHROW6YE=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0 h1:WN/zA6sCT4VzCA9CpRTGj6wiu17vIFozm/0yxNwKeGs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0/go.mod h1:sqWPNepjCX0+Ye++N9HwJjJ7KUAOkn4/ML/2GzrZquQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0 h1:bloEe7hK+H95zQ5WusQwQCILjudl6ljyR4kO95+Ocuo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0/go.mod h1:/vYbT5YZ/SyKGtbBtKCI00sGUk3Xd90A2hT5iSWP8Dk=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0 h1:zDScLkNf/llyiH1cjpVv5PhJAT5AcHIXCB35zW5jsbM=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0/go.mod h1:S+GR7FZJYtFBnbjgD737QImuvm8d4+PBccpI0Xrda4E=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0 h1:5cEQNFwYAn8PJ66l88lGLMSz9TYWiIUFayDwAtpJumw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0/go.mod h1:uSskqW6AAEHg/2yZ6hNo9V0OfQmM/wHP9lSNr2OSUU4=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0 h1:Syf4U5GrSl2fOGXBAChHrdSvMRBhi7BFiDwKbFkNo/8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0/go.mod h1:Q9shex5tQOoK4FeVx0NvYkwu18hCPFlRnwqqQzLfbpo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0 h1:l6UiNM2jEs+kBmsNt8qg2dEZpUVc8CLsvYksa9CZRDs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0/go.mod h1:Ky2VVQfIqv9ifden+amJv3sTi3Y/9u6rNMtq8cnVECs=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0 h1:vgYhhliqQ6WUy5b1BE0ILJQKTweaLDPI5l/bUIunqLo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0/go.mod h1:UG/8zqyqbdN0HHkiWC7GZW4wFL4GIyRtsshc1RY8bGo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0 h1:c4vPI/PrRPnueoaKJKzBztsASIn5yZ7BT7uc4PHR39E=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0/go.mod h1:MR9VrhTtPazVAEWR/RPQln1i0Cmrc/6e2+zRpI/gwhM=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0 h1:F4rPll42bwVC2tuoyG0f0LmjfoBMc5eNT0j7iDtbDXk=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0/go.mod h1:GReM8iGTmB0zIH9A2vT3Ki5xP690A9RVycxB65dao38=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0 h1:9b7iQMFbA1rG9DVkepxN9qilmEYG5VaVb+meTsVEKBU=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0/go.mod h1:urzOE2U+Ax4Zho6VYFGOC/x1B4npKNDB6GLJ/F9k56I=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0 h1:oNEV5G5m59ekwho7BaiBdUbqWMAsneE6IFkVkiZY4Yg=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0/go.mod h1:tULSPMh5LZ9UJZa5QgAd7okycfM0x28AoWhtRt7DNvw=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0 h1:qPUFbh7d9Ddiyky8F4by+KRUUksqMiO+gFDXGkaxevw=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0/go.mod h1:e+cVHDHttCojcC8iyBcDFtfK3JWZlqaDc+WCTl5sEdo=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0 h1:frNZmJqTjdGO4vkxM2LN5URbddpD+R8taOCtDz3JJiQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0/go.mod h1:qFJOAhv+agSMdJSmsIW4QDsTWIdWo8NRZvY3OV2iWV8=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0 h1:tIJu6tCPiZKK6FiBfn2ritlwMSrjwS4iNTI0u02J/ns=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0/go.mod h1:cI/ZaTpiY6QDTihTwSKXgtsWXwSPr4Bpb95CjA1LO5Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0 h1:Azx7wP6Me2iXr6h2bTqbRjtxB6HnXN9QpYECLu/eQ8I=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0/go.mod h1:KezOwO7COPCsZnE8ECCrWvAywUhTZMYtJx7H36JguoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -1520,6 +1516,8 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
+github.com/paulcacheux/iouring-go v0.0.0-20241115154236-2c7785c40a0f h1:y9Dz6t0aNsYtm0UrwTbu85LlsKN8GjnEhABMSrxEmvU=
+github.com/paulcacheux/iouring-go v0.0.0-20241115154236-2c7785c40a0f/go.mod h1:LEzdaZarZ5aqROlLIwJ4P7h3+4o71008fSy6wpaEB+s=
github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@@ -1585,8 +1583,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
-github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
+github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -1669,8 +1667,8 @@ github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
-github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
-github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
+github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM=
+github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
@@ -1681,8 +1679,8 @@ github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/signalfx/sapm-proto v0.14.0 h1:KWh3I5E4EkelB19aP1/54Ik8khSioC/RVRW/riOfRGg=
-github.com/signalfx/sapm-proto v0.14.0/go.mod h1:Km6PskZh966cqNoUn3AmRyGRix5VfwnxVBvn2vjRC9U=
+github.com/signalfx/sapm-proto v0.16.0 h1:E8W+awZBl3nmpDTdbPK8Uwla9FdSCWpZChR3p+7bzw0=
+github.com/signalfx/sapm-proto v0.16.0/go.mod h1:7VTAIoYIgkAK+j6w3l4Aici+EYySGAmXCK0rfD2OZkU=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sijms/go-ora/v2 v2.8.19 h1:7LoKZatDYGi18mkpQTR/gQvG9yOdtc7hPAex96Bqisc=
@@ -1958,160 +1956,178 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
-go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
-go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0 h1:XwHBWCP0m/d6YZ0VZltzVvnz5hDB9ik7sPRjJIdmjUk=
-go.opentelemetry.io/collector/config/configgrpc v0.111.0/go.mod h1:K9OLwZM8dGNL1Jul/FGxlRsnLd1umgDyA+yxq2BNXUs=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/confignet v1.17.0 h1:cBmDdiPuIVrHiecgCKyXhRYmDOz9Do5IM7O1JhbB3es=
-go.opentelemetry.io/collector/config/confignet v1.17.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 h1:KH0ABOBfSPp5XZtHkoXeI9wKoOD9B0eN6TDo08SwN/c=
-go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0/go.mod h1:jyFbV9hLrYJf2zNjqcpzkzB6zmPj/Ohr+S+vmPuxyMY=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 h1:R/U0uWAyppNrxvF+piqhnhcrPSNz3wnwHyEIRCbrmh0=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0/go.mod h1:3mtUk7wwDQyPUsHtCOLi2v0uSZWfC00BhOhqHs4CWs4=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0 h1:uNMlftoTRqjavhoGY2LvUc4z0+lDht1UHrvj856skRU=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.111.0/go.mod h1:1Vhweh5dDeTUOmcw5WSGHPgHUwZzouf3y2dQr4yFWjA=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 h1:FtKwwHS8NSNJWrhE7JsFlYhe+2GojENfOQbhQMSTyRo=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0/go.mod h1:9/R8ucfVQEEEHMv9b7M6rSB8nF2k+MfIO93vbDEsaMU=
-go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro=
-go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
-go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 h1:KiypGuW+JG1gV9l6pvSEIMKwn+MLJn0Ol62HMe5ytr4=
-go.opentelemetry.io/collector/exporter/debugexporter v0.111.0/go.mod h1:7ihw3KDcvrY5kXIRNxB64Pz6kguf5Q0x9mJAvbBLT5Y=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/exporter/nopexporter v0.111.0 h1:CRgqzloeVAZDnjJ+ayfqOcQZ6uREf6O65NCHV4LqGcY=
-go.opentelemetry.io/collector/exporter/nopexporter v0.111.0/go.mod h1:Mw/hi2MVqUt3QEmxjOWcomICZi7Jx/31tCmr5l0T2+o=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 h1:eOyd1InTuymfIP4oMzJki28JjpGQzOEK6Y0YlI6pwgA=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0/go.mod h1:nOUveQ4KWFqlCA6b0L5DXMosZCcNtit8abEuLHwBaUM=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0 h1:e7UHbu515LfkFKHdXdOvz0gQP6jXD+uuoKs1PRXHEw0=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0/go.mod h1:0+TSVnAEw9hyF34b0eu36IFVLpAgpxOugAI2ZgNPX18=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY=
-go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw=
-go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY=
-go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws=
-go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
-go.opentelemetry.io/collector/filter v0.111.0 h1:OUE1wKch/C5AfF/TUpMWbKTSYYvSUlNPHADD0c8Also=
-go.opentelemetry.io/collector/filter v0.111.0/go.mod h1:74Acew42eexKiuLu3tVehyMK4b5XJPWXoJyNjK2FM+U=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc=
-go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk=
-go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0 h1:kiXvbIR1K8Tcv10ffaA9MvcPoGpm6uitaXzfhDZnV3o=
-go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0/go.mod h1:7jwDuhMkglGVSyJT6CQ1vE7A6fjYTvbap7/QVl3P8kQ=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 h1:JoBjX0LjmQ3n22o54sxAN9T6sgxumBLDqq0RElvYAVc=
-go.opentelemetry.io/collector/processor/batchprocessor v0.111.0/go.mod h1:8Dw89aInFh4dX3A0iyIcpbQ1A/8hVWtxjrJKyAOb9TQ=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0 h1:Y5gMjXn6bbMiOgiGSaWvOFb4jbCVraG1/GjQsJjCEMI=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0/go.mod h1:s42Gm7LMqietFs0Cpl+ma2sEYZP3RWHIlXlWimGW2cQ=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0 h1:JWg6F//9AH34KVL1RkRVpcyJpbzIWMtpCLxggeo3gsY=
-go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0/go.mod h1:FpiGrlkIhMh9gNzaw29m5zhSkRRruZnwB2RyGI0yCsw=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0 h1:VsQ55DvHvjYop+wbpY6qCSF0cfoMNMZEd0pANa5l+9Y=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0/go.mod h1:/zUX2GHa7CIeqGRl+hpQk3zQ1QCaUpBK42XGqrXAbzQ=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
-go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
+go.opentelemetry.io/collector v0.113.0 h1:dBuo2/OKBhoMCR86W4fFJLXGQ0gJfKRmi65AZwFkU2I=
+go.opentelemetry.io/collector v0.113.0/go.mod h1:XbjD4Yw9LunLo3IJu3ZZytNZ0drEVznxw1Z14Ujlw3s=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0 h1:rNbRd033JlIeU+TH+3bEt4OwRlEwrktWdf6V+VUJUPk=
+go.opentelemetry.io/collector/config/configgrpc v0.113.0/go.mod h1:InXxPUj1oxJ57Sl954d2tQxXTgVHhfppFYjMwGjQukg=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0 h1:f8O/I5pVRN86Gx5mHekNx92S6fGdOS4VcooRJKWe6Bs=
+go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0/go.mod h1:AiaW5YW1LD0/WlZuc8eZuZPBH6PA9QqsiAYRX1iC6T0=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0 h1:TYwyk4ea3U+5MYcEjrzZAaonBcLlabQu8CZeB7ekAYY=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0/go.mod h1:i3mL4OSGI5JM0hnzHujhJK+LDlvO3XrJxBsuclfU/jY=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0 h1:a077jcs3DVtaVdmgmCk3x4rRYuTkIqMDsoUc+VICHZk=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0/go.mod h1:HjYkzhHbwUacv27nq0JLsslGpbtrXyyfU30Oc72AWLU=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0 h1:8LoQxjlduFQUEwYuHWnxEj0A+GcAtpv2qPpDJVz7A5E=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0/go.mod h1:Y8ErEl5m9+1AWzWcMn52PATH5dw50wuyyPMffK62RCI=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0 h1:oV66DKiEdAt8EMZqGSChK2iEOxjrVaWRhf4OqqmqjbM=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0/go.mod h1:jtNUdO6i1k38BG7vFst+d1jk/N+c419uVR8HB4J0VjI=
+go.opentelemetry.io/collector/connector v0.113.0 h1:ii+s1CjsLxtglqRlFs6tv8UU/uX45dyN9lbTRbR0p8g=
+go.opentelemetry.io/collector/connector v0.113.0/go.mod h1:KmA8eQouTVxVk65Bf6utuMjWovMuOvNVRcYV60CAGtc=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0 h1:yAEKTxVGpBtHrrXeZFrBcVOQkduxCncH0o4hqKrDRyw=
+go.opentelemetry.io/collector/connector/connectorprofiles v0.113.0/go.mod h1:+mwzwIZ1cEK29ByfC38uF8hmFO8Wf9ShT1c756XX+RI=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0 h1:WHekoL0izkrKLVQLv79v0QhqfnXkVcw0sgdF07EqWLM=
+go.opentelemetry.io/collector/connector/connectortest v0.113.0/go.mod h1:KouywNfkxRf+yzbI2pdolzTLkLoCV4ASEI2o2pDt+Cg=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0 h1:2kLIt+6dGmhCd48CWXh3IEon/uW4+c8y81IGCA/h8wE=
+go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.113.0/go.mod h1:/eESy7Ifyf7G6r6WUpEOq2tnfjIJ2QNB2EvZcEu0aWA=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0 h1:iShn3SuSpx78YLgl7fQCJsHLs7z0RDtbN58/Amoy5xc=
+go.opentelemetry.io/collector/exporter/debugexporter v0.113.0/go.mod h1:O1dLnYA81a+ZecBD89vjZSBgkAnhnfXwsmYsE7LP2/s=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0 h1:Auz2vZYReIlyDvJ162OCO8XcV7L2BIbFb5HJWxerc5A=
+go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.113.0/go.mod h1:JQuawcAfDuzNneDF5Ep1CZJ5snsLp6Bh1gZcHhja7yU=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/exporter/nopexporter v0.113.0 h1:DClFr8PNUc+f8fciNK3Sdj+ydCgZRc2zVk+1WCLyPfU=
+go.opentelemetry.io/collector/exporter/nopexporter v0.113.0/go.mod h1:RGn9QUUOldcD19yKyg5e6dBjy/o//RaWGOhkS6azhqo=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0 h1://7diunG5SohqaYfqvHzCtcfrY7y3WQj0vklFYgeNW4=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0/go.mod h1:THF0eq4lA6dYOho53iKFCBOv91HEeISZyep5dXr+fBU=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0 h1:22Srn4V6ogOdi4Bn6eKtKqAidWyjPkYKYDR3Xq91nFY=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0/go.mod h1:BRA54WRyPS9RYDIUEGxxJvxJ/uZ66++bCFPHliDstCQ=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0 h1:GuJzpnrJtsMrKWGmb1VL4EqL6x1HDtZmtvy3yEjth6Y=
+go.opentelemetry.io/collector/extension/extensioncapabilities v0.113.0/go.mod h1:oa72qndu7nAfEeEpDyDi9qLcaSJGIscLc/eeojFADx0=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0 h1:b/Clxso9uVwLVYjvRQ1NGBWHpUEZ/++uA5sJbBj0ryo=
+go.opentelemetry.io/collector/extension/zpagesextension v0.113.0/go.mod h1:5csGYy9Ydfy6Hpw3Tod864P6HUEZpA6UiuPJPG3TjSU=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/filter v0.113.0 h1:5ODwM8QEOzZq08H8DJilBa4PHieXpBreJVKZ0D2YshA=
+go.opentelemetry.io/collector/filter v0.113.0/go.mod h1:Mh3N6cpVijdamUJj1tAgSU1RG/Ek4FuY2ODKYxKZDtk=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0 h1:Beu2zAN6/EDXQ6hMFU6FT1BsnU5FXmWNOlfTAhrgbGc=
+go.opentelemetry.io/collector/internal/fanoutconsumer v0.113.0/go.mod h1:WUXbc4L6KJ3SpmsxBgId0OYzRDuS7n274kNpqrgnSmY=
+go.opentelemetry.io/collector/internal/memorylimiter v0.113.0 h1:qe3xZYB4BgSuPDgFMQbcJ5gDy8t+S1vt6pL+OKrdx9E=
+go.opentelemetry.io/collector/internal/memorylimiter v0.113.0/go.mod h1:Eo/XZsFPS1mo0DHnAaVeyPNFn3HKVXB2nog++b3CnRc=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0 h1:a4gT+t+rboCaH70anhu+ZQp9IJ7UjVeZxZJvxTBgCqU=
+go.opentelemetry.io/collector/internal/sharedcomponent v0.113.0/go.mod h1:6WDDyjI4pbtfUmtv/JKLs7OwieEEvaDVb3Zcc4oA9Vg=
+go.opentelemetry.io/collector/otelcol v0.113.0 h1:t32gA8Pg9lsqYQml4mgvHdqFSsjvPZMvGywi0zXQYjk=
+go.opentelemetry.io/collector/otelcol v0.113.0/go.mod h1:PV6pDLPEaulRs3ceWYNEDuG5100F35I5VzeC2ekT/vY=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0 h1:bfu9oQQbO6KEcpgh7muc1ixsGQs+qFWwi9LyabGILqw=
+go.opentelemetry.io/collector/otelcol/otelcoltest v0.113.0/go.mod h1:0bag/J2REbnIKKKHvYe0RqyjmsUv4OJH14kNef+lD4Q=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 h1:PwQnErsLvEd1x6VIyjLmKQot9huKWqIfEz1kd+8aj4k=
+go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0/go.mod h1:tChJYsCG3wc6JPT9aJO3y+32V14NhmCFZOh3k5ORGdQ=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTFIi8ziIa2idZ5MRlBIf4Wr72gZNgXb0=
+go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 h1:3/5z0Pe/yduwF0DSpytW2+mwDA5JaIL/w6vfNYy5KzQ=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0/go.mod h1:h3wIlqMtJGIDKttjMJBo6J4dHU/Mi6+bKSxvRVUpsXs=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0 h1:Kgan6/DCH1YZzOztXPPair+V2czPmrJxxrIIxLVYmn4=
+go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0/go.mod h1:1nVoRLC/antEw4gvcyaRBT3aBt7nh3KBASWLLhmm0Ts=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0 h1:yhnj8kmh1IQ4g6fIWvhum/wYPUU2WmRpQuy1iSvf4e4=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0/go.mod h1:3OB+oJlOb1rlLLdBwxae4g2Qh5C97Eg17HVveIddUCw=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/service v0.113.0 h1:SFT+kWuj9TehC34MLEWfXH6QocGl3MYqLJ7UbxZtWzM=
+go.opentelemetry.io/collector/service v0.113.0/go.mod h1:6+JY80Yd4J4RWpvRmpCUUZFOZKGVs9a1QKCKPlDrKfs=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ=
+go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw=
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
-go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
-go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
-go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo=
+go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ=
+go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko=
+go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE=
-go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
-go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
-go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0=
-go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8=
-go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I=
+go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0 h1:HZgBIps9wH0RDrwjrmNa3DVbNRW60HEhdzqZFyAp3fI=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.31.0/go.mod h1:RDRhvt6TDG0eIXmonAx5bd9IcwpqCkziwkOClzWKwAQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
+go.opentelemetry.io/otel/log v0.7.0 h1:d1abJc0b1QQZADKvfe9JqqrfmPYQCz2tUSO+0XZmuV4=
+go.opentelemetry.io/otel/log v0.7.0/go.mod h1:2jf2z7uVfnzDNknKTO9G+ahcOAyWcp1fJmk/wJjULRo=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI=
-go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE=
+go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ=
+go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
diff --git a/internal/remote-agent/main.go b/internal/remote-agent/main.go
new file mode 100644
index 0000000000000..be86d0f6811ad
--- /dev/null
+++ b/internal/remote-agent/main.go
@@ -0,0 +1,227 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package main contains the logic for the remote-agent example client
+package main
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "time"
+
+ grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "github.com/DataDog/datadog-agent/pkg/api/security"
+ pbcore "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core"
+ grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc"
+)
+
+type remoteAgentServer struct {
+ started time.Time
+}
+
+func (s *remoteAgentServer) GetStatusDetails(_ context.Context, req *pbcore.GetStatusDetailsRequest) (*pbcore.GetStatusDetailsResponse, error) {
+ log.Printf("Got request for status details: %v", req)
+
+ fields := make(map[string]string)
+ fields["Started"] = s.started.Format(time.RFC3339)
+
+ return &pbcore.GetStatusDetailsResponse{
+ MainSection: &pbcore.StatusSection{
+ Fields: fields,
+ },
+ NamedSections: make(map[string]*pbcore.StatusSection),
+ }, nil
+
+}
+
+func (s *remoteAgentServer) GetFlareFiles(_ context.Context, req *pbcore.GetFlareFilesRequest) (*pbcore.GetFlareFilesResponse, error) {
+ log.Printf("Got request for flare files: %v", req)
+
+ files := make(map[string][]byte, 0)
+ files["example.txt"] = []byte("Hello, world!\n")
+
+ return &pbcore.GetFlareFilesResponse{
+ Files: files,
+ }, nil
+}
+
+func newRemoteAgentServer() *remoteAgentServer {
+ return &remoteAgentServer{
+ started: time.Now(),
+ }
+}
+
+func main() {
+ // Read in all of the necessary configuration for this remote agent.
+ var agentID string
+ var displayName string
+ var listenAddr string
+ var agentIpcAddress string
+ var agentAuthTokenFilePath string
+
+ flag.StringVar(&agentID, "agent-id", "", "Agent ID to register with")
+ flag.StringVar(&displayName, "display-name", "", "Display name to register with")
+ flag.StringVar(&listenAddr, "listen-addr", "", "Address to listen on")
+ flag.StringVar(&agentIpcAddress, "agent-ipc-address", "", "Agent IPC server address")
+ flag.StringVar(&agentAuthTokenFilePath, "agent-auth-token-file", "", "Path to Agent authentication token file")
+
+ flag.Parse()
+
+ if flag.NFlag() != 5 {
+ flag.Usage()
+ os.Exit(1)
+ }
+
+ // Build and spawn our gRPC server.
+ selfAuthToken, err := buildAndSpawnGrpcServer(listenAddr, newRemoteAgentServer())
+ if err != nil {
+ log.Fatalf("failed to build/spawn gRPC server: %v", err)
+ }
+
+ log.Printf("Spawned remote agent gRPC server on %s.", listenAddr)
+
+ // Now we'll register with the Core Agent, pointing it to our gRPC server.
+ rawAgentAuthToken, err := os.ReadFile(agentAuthTokenFilePath)
+ if err != nil {
+ log.Fatalf("failed to read agent auth token file: %v", err)
+ }
+
+ agentAuthToken := string(rawAgentAuthToken)
+ agentClient, err := newAgentSecureClient(agentIpcAddress, agentAuthToken)
+ if err != nil {
+ log.Fatalf("failed to create agent client: %v", err)
+ }
+
+ registerReq := &pbcore.RegisterRemoteAgentRequest{
+ Id: agentID,
+ DisplayName: displayName,
+ ApiEndpoint: listenAddr,
+ AuthToken: selfAuthToken,
+ }
+
+ log.Printf("Registering with Core Agent at %s...", agentIpcAddress)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ resp, err := agentClient.RegisterRemoteAgent(ctx, registerReq)
+ if err != nil {
+ log.Fatalf("failed to register remote agent: %v", err)
+ }
+
+ log.Printf("Registered with Core Agent. Recommended refresh interval of %d seconds.", resp.RecommendedRefreshIntervalSecs)
+
+ // Wait forever, periodically refreshing our registration.
+ refreshTicker := time.NewTicker(time.Duration(resp.RecommendedRefreshIntervalSecs) * time.Second)
+ for range refreshTicker.C {
+ _, err := agentClient.RegisterRemoteAgent(context.Background(), registerReq)
+ if err != nil {
+ log.Fatalf("failed to refresh remote agent registration: %v", err)
+ }
+
+ log.Println("Refreshed registration with Core Agent.")
+ }
+}
+
+func buildAndSpawnGrpcServer(listenAddr string, server pbcore.RemoteAgentServer) (string, error) {
+ // Generate a self-signed certificate for our server.
+ host, _, err := net.SplitHostPort(listenAddr)
+ if err != nil {
+ return "", fmt.Errorf("unable to extract hostname from listen address: %v", err)
+ }
+
+ tlsKeyPair, err := buildSelfSignedTLSCertificate(host)
+ if err != nil {
+ return "", fmt.Errorf("unable to generate TLS certificate: %v", err)
+ }
+
+ // Make sure we can listen on the intended address.
+ listener, err := net.Listen("tcp", listenAddr)
+ if err != nil {
+ log.Fatalf("failed to listen: %v", err)
+ }
+
+ // Generate an authentication token and set up our gRPC server to both serve over TLS and authenticate each RPC
+ // using the authentication token.
+ authToken, err := generateAuthenticationToken()
+ if err != nil {
+ return "", fmt.Errorf("unable to generate authentication token: %v", err)
+ }
+
+ serverOpts := []grpc.ServerOption{
+ grpc.Creds(credentials.NewServerTLSFromCert(tlsKeyPair)),
+ grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(grpcutil.StaticAuthInterceptor(authToken))),
+ }
+
+ grpcServer := grpc.NewServer(serverOpts...)
+ pbcore.RegisterRemoteAgentServer(grpcServer, server)
+
+ go func() {
+ if err := grpcServer.Serve(listener); err != nil {
+ log.Fatalf("failed to serve: %v", err)
+ }
+ }()
+
+ return authToken, nil
+}
+
+func buildSelfSignedTLSCertificate(host string) (*tls.Certificate, error) {
+ hosts := []string{host}
+ _, certPEM, key, err := security.GenerateRootCert(hosts, 2048)
+ if err != nil {
+ return nil, errors.New("unable to generate certificate")
+ }
+
+ // PEM encode the private key
+ keyPEM := pem.EncodeToMemory(&pem.Block{
+ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key),
+ })
+
+ pair, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ return nil, fmt.Errorf("unable to generate TLS key pair: %v", err)
+ }
+
+ return &pair, nil
+}
+
+func generateAuthenticationToken() (string, error) {
+ rawToken := make([]byte, 32)
+ _, err := rand.Read(rawToken)
+ if err != nil {
+ return "", fmt.Errorf("can't create authentication token value: %s", err)
+ }
+
+ return hex.EncodeToString(rawToken), nil
+}
+
+func newAgentSecureClient(ipcAddress string, agentAuthToken string) (pbcore.AgentSecureClient, error) {
+ tlsCreds := credentials.NewTLS(&tls.Config{
+ InsecureSkipVerify: true,
+ })
+
+ conn, err := grpc.NewClient(ipcAddress,
+ grpc.WithTransportCredentials(tlsCreds),
+ grpc.WithPerRPCCredentials(grpcutil.NewBearerTokenAuth(agentAuthToken)),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return pbcore.NewAgentSecureClient(conn), nil
+}
diff --git a/modules.yml b/modules.yml
new file mode 100644
index 0000000000000..76f89c8ba8211
--- /dev/null
+++ b/modules.yml
@@ -0,0 +1,280 @@
+# This file contains the go modules configuration.
+# See tasks/libs/common/gomodules.py for more information.
+
+modules:
+ .:
+ independent: false
+ lint_targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ test_targets:
+ - ./pkg
+ - ./cmd
+ - ./comp
+ comp/api/api/def:
+ used_by_otel: true
+ comp/api/authtoken: default
+ comp/core/config:
+ used_by_otel: true
+ comp/core/flare/builder:
+ used_by_otel: true
+ comp/core/flare/types:
+ used_by_otel: true
+ comp/core/hostname/hostnameinterface:
+ used_by_otel: true
+ comp/core/log/def:
+ used_by_otel: true
+ comp/core/log/impl:
+ used_by_otel: true
+ comp/core/log/impl-trace: default
+ comp/core/log/mock:
+ used_by_otel: true
+ comp/core/secrets:
+ used_by_otel: true
+ comp/core/status:
+ used_by_otel: true
+ comp/core/status/statusimpl: default
+ comp/core/tagger/tags:
+ used_by_otel: true
+ comp/core/tagger/types:
+ used_by_otel: true
+ comp/core/tagger/utils:
+ used_by_otel: true
+ comp/core/telemetry:
+ used_by_otel: true
+ comp/def:
+ used_by_otel: true
+ comp/forwarder/defaultforwarder:
+ used_by_otel: true
+ comp/forwarder/orchestrator/orchestratorinterface:
+ used_by_otel: true
+ comp/logs/agent/config:
+ used_by_otel: true
+ comp/netflow/payload: default
+ comp/otelcol/collector-contrib/def:
+ used_by_otel: true
+ comp/otelcol/collector-contrib/impl:
+ used_by_otel: true
+ comp/otelcol/converter/def:
+ used_by_otel: true
+ comp/otelcol/converter/impl:
+ used_by_otel: true
+ comp/otelcol/ddflareextension/def:
+ used_by_otel: true
+ comp/otelcol/ddflareextension/impl:
+ used_by_otel: true
+ comp/otelcol/logsagentpipeline:
+ used_by_otel: true
+ comp/otelcol/logsagentpipeline/logsagentpipelineimpl:
+ used_by_otel: true
+ comp/otelcol/otlp/components/exporter/datadogexporter:
+ used_by_otel: true
+ comp/otelcol/otlp/components/exporter/logsagentexporter:
+ used_by_otel: true
+ comp/otelcol/otlp/components/exporter/serializerexporter:
+ used_by_otel: true
+ comp/otelcol/otlp/components/metricsclient:
+ used_by_otel: true
+ comp/otelcol/otlp/components/processor/infraattributesprocessor:
+ used_by_otel: true
+ comp/otelcol/otlp/components/statsprocessor:
+ used_by_otel: true
+ comp/otelcol/otlp/testutil:
+ used_by_otel: true
+ comp/serializer/compression:
+ used_by_otel: true
+ comp/trace/agent/def:
+ used_by_otel: true
+ comp/trace/compression/def:
+ used_by_otel: true
+ comp/trace/compression/impl-gzip:
+ used_by_otel: true
+ comp/trace/compression/impl-zstd:
+ used_by_otel: true
+ internal/tools:
+ independent: false
+ should_tag: false
+ should_test_condition: never
+ internal/tools/independent-lint:
+ independent: false
+ should_tag: false
+ should_test_condition: never
+ internal/tools/modformatter:
+ independent: false
+ should_tag: false
+ should_test_condition: never
+ internal/tools/modparser:
+ independent: false
+ should_tag: false
+ should_test_condition: never
+ internal/tools/modparser/testdata/badformat: ignored
+ internal/tools/modparser/testdata/match: ignored
+ internal/tools/modparser/testdata/nomatch: ignored
+ internal/tools/modparser/testdata/patchgoversion: ignored
+ internal/tools/proto:
+ independent: false
+ should_tag: false
+ should_test_condition: never
+ pkg/aggregator/ckey:
+ used_by_otel: true
+ pkg/api:
+ used_by_otel: true
+ pkg/collector/check/defaults:
+ used_by_otel: true
+ pkg/config/env:
+ used_by_otel: true
+ pkg/config/mock:
+ used_by_otel: true
+ pkg/config/model:
+ used_by_otel: true
+ pkg/config/nodetreemodel:
+ used_by_otel: true
+ pkg/config/remote: default
+ pkg/config/setup:
+ used_by_otel: true
+ pkg/config/structure:
+ used_by_otel: true
+ pkg/config/teeconfig:
+ used_by_otel: true
+ pkg/config/utils:
+ used_by_otel: true
+ pkg/errors: default
+ pkg/gohai:
+ importable: false
+ pkg/linters/components/pkgconfigusage:
+ independent: false
+ should_tag: false
+ pkg/logs/auditor:
+ used_by_otel: true
+ pkg/logs/client:
+ used_by_otel: true
+ pkg/logs/diagnostic:
+ used_by_otel: true
+ pkg/logs/message:
+ used_by_otel: true
+ pkg/logs/metrics:
+ used_by_otel: true
+ pkg/logs/pipeline:
+ used_by_otel: true
+ pkg/logs/processor:
+ used_by_otel: true
+ pkg/logs/sds:
+ used_by_otel: true
+ pkg/logs/sender:
+ used_by_otel: true
+ pkg/logs/sources:
+ used_by_otel: true
+ pkg/logs/status/statusinterface:
+ used_by_otel: true
+ pkg/logs/status/utils:
+ used_by_otel: true
+ pkg/logs/util/testutils:
+ used_by_otel: true
+ pkg/metrics:
+ used_by_otel: true
+ pkg/networkdevice/profile: default
+ pkg/obfuscate:
+ used_by_otel: true
+ pkg/orchestrator/model:
+ used_by_otel: true
+ pkg/process/procutil/resources: ignored
+ pkg/process/util/api:
+ used_by_otel: true
+ pkg/proto:
+ used_by_otel: true
+ pkg/remoteconfig/state:
+ used_by_otel: true
+ pkg/security/secl: default
+ pkg/security/seclwin:
+ should_test_condition: never
+ pkg/serializer:
+ used_by_otel: true
+ pkg/status/health:
+ used_by_otel: true
+ pkg/tagger/types:
+ used_by_otel: true
+ pkg/tagset:
+ used_by_otel: true
+ pkg/telemetry:
+ used_by_otel: true
+ pkg/trace:
+ used_by_otel: true
+ pkg/trace/stats/oteltest:
+ used_by_otel: true
+ pkg/util/backoff:
+ used_by_otel: true
+ pkg/util/buf:
+ used_by_otel: true
+ pkg/util/cache: default
+ pkg/util/cgroups:
+ should_test_condition: is_linux
+ used_by_otel: true
+ pkg/util/common:
+ used_by_otel: true
+ pkg/util/containers/image:
+ used_by_otel: true
+ pkg/util/defaultpaths:
+ used_by_otel: true
+ pkg/util/executable:
+ used_by_otel: true
+ pkg/util/filesystem:
+ used_by_otel: true
+ pkg/util/flavor: default
+ pkg/util/fxutil:
+ used_by_otel: true
+ pkg/util/grpc: default
+ pkg/util/hostname/validate:
+ used_by_otel: true
+ pkg/util/http:
+ used_by_otel: true
+ pkg/util/json:
+ used_by_otel: true
+ pkg/util/log:
+ used_by_otel: true
+ pkg/util/log/setup:
+ used_by_otel: true
+ pkg/util/optional:
+ used_by_otel: true
+ pkg/util/pointer:
+ used_by_otel: true
+ pkg/util/scrubber:
+ used_by_otel: true
+ pkg/util/sort:
+ used_by_otel: true
+ pkg/util/startstop:
+ used_by_otel: true
+ pkg/util/statstracker:
+ used_by_otel: true
+ pkg/util/system:
+ used_by_otel: true
+ pkg/util/system/socket:
+ used_by_otel: true
+ pkg/util/testutil:
+ used_by_otel: true
+ pkg/util/utilizationtracker:
+ used_by_otel: true
+ pkg/util/uuid: default
+ pkg/util/winutil:
+ used_by_otel: true
+ pkg/version:
+ used_by_otel: true
+ tasks/unit_tests/testdata/go_mod_formatter/invalid_package: ignored
+ tasks/unit_tests/testdata/go_mod_formatter/valid_package: ignored
+ test/fakeintake: default
+ test/integration/serverless/recorder-extension: ignored
+ test/integration/serverless/src: ignored
+ test/new-e2e:
+ lint_targets:
+ - .
+ - ./examples
+ test_targets:
+ - ./pkg/runner
+ - ./pkg/utils/e2e/client
+ - ./system-probe/
+ test/otel:
+ used_by_otel: true
+ tools/retry_file_dump:
+ independent: false
+ should_tag: false
+ should_test_condition: never
diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb
index 18156227f1d58..f90a8843a2dfb 100644
--- a/omnibus/config/software/datadog-agent-finalize.rb
+++ b/omnibus/config/software/datadog-agent-finalize.rb
@@ -168,9 +168,6 @@
# Most postgres binaries are removed in postgres' own software
# recipe, but we need pg_config to build psycopq.
delete "#{install_dir}/embedded/bin/pg_config"
-
- # Edit rpath from a true path to relative path for each binary
- command "inv omnibus.rpath-edit #{install_dir} #{install_dir}", cwd: Dir.pwd
end
if osx_target?
diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb
index ee030c17aacca..ccf7da2ae1dfb 100644
--- a/omnibus/config/software/datadog-agent.rb
+++ b/omnibus/config/software/datadog-agent.rb
@@ -134,7 +134,7 @@
# Process agent
if not bundled_agents.include? "process-agent"
- command "invoke -e process-agent.build --install-path=#{install_dir} --major-version #{major_version_arg} --flavor #{flavor_arg} --no-bundle", :env => env
+ command "invoke -e process-agent.build --install-path=#{install_dir} --major-version #{major_version_arg} --flavor #{flavor_arg}", :env => env
end
if windows_target?
@@ -150,7 +150,7 @@
if windows_target?
command "invoke -e system-probe.build", env: env
elsif linux_target?
- command "invoke -e system-probe.build-sysprobe-binary --install-path=#{install_dir} --no-bundle", env: env
+ command "invoke -e system-probe.build-sysprobe-binary --install-path=#{install_dir}", env: env
end
end
@@ -173,7 +173,7 @@
secagent_support = (not heroku_target?) and (not windows_target? or (ENV['WINDOWS_DDPROCMON_DRIVER'] and not ENV['WINDOWS_DDPROCMON_DRIVER'].empty?))
if secagent_support
if not bundled_agents.include? "security-agent"
- command "invoke -e security-agent.build --install-path=#{install_dir} --major-version #{major_version_arg} --no-bundle", :env => env
+ command "invoke -e security-agent.build --install-path=#{install_dir} --major-version #{major_version_arg}", :env => env
end
if windows_target?
copy 'bin/security-agent/security-agent.exe', "#{install_dir}/bin/agent"
diff --git a/pkg/aggregator/aggregator.go b/pkg/aggregator/aggregator.go
index b1fa7fb464c5c..d42ac89d50e47 100644
--- a/pkg/aggregator/aggregator.go
+++ b/pkg/aggregator/aggregator.go
@@ -13,7 +13,7 @@ import (
"sync"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
diff --git a/pkg/aggregator/aggregator_test.go b/pkg/aggregator/aggregator_test.go
index 2b51ce784d230..64c33496891c7 100644
--- a/pkg/aggregator/aggregator_test.go
+++ b/pkg/aggregator/aggregator_test.go
@@ -22,8 +22,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
@@ -146,7 +145,7 @@ func TestAddServiceCheckDefaultValues(t *testing.T) {
// -
s := &MockSerializerIterableSerie{}
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggerMock.SetupFakeTagger(t)
agg := NewBufferedAggregator(s, nil, taggerComponent, "resolved-hostname", DefaultFlushInterval)
agg.addServiceCheck(servicecheck.ServiceCheck{
@@ -179,7 +178,7 @@ func TestAddEventDefaultValues(t *testing.T) {
// -
s := &MockSerializerIterableSerie{}
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggerMock.SetupFakeTagger(t)
agg := NewBufferedAggregator(s, nil, taggerComponent, "resolved-hostname", DefaultFlushInterval)
agg.addEvent(event.Event{
@@ -229,7 +228,7 @@ func TestDefaultData(t *testing.T) {
// -
s := &MockSerializerIterableSerie{}
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggerMock.SetupFakeTagger(t)
agg := NewBufferedAggregator(s, nil, taggerComponent, "hostname", DefaultFlushInterval)
start := time.Now()
@@ -584,7 +583,7 @@ func TestTags(t *testing.T) {
mockConfig := configmock.New(t)
mockConfig.SetWithoutSource("basic_telemetry_add_container_tags", tt.tlmContainerTagsEnabled)
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggerMock.SetupFakeTagger(t)
agg := NewBufferedAggregator(nil, nil, taggerComponent, tt.hostname, time.Second)
agg.agentTags = tt.agentTags
@@ -619,7 +618,7 @@ func TestAddDJMRecurrentSeries(t *testing.T) {
s := &MockSerializerIterableSerie{}
// NewBufferedAggregator with DJM enable will create a new recurrentSeries
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := taggerMock.SetupFakeTagger(t)
NewBufferedAggregator(s, nil, taggerComponent, "hostname", DefaultFlushInterval)
expectedRecurrentSeries := metrics.Series{&metrics.Serie{
diff --git a/pkg/aggregator/check_sampler.go b/pkg/aggregator/check_sampler.go
index 8bd0b29a68a0a..98803f20e089e 100644
--- a/pkg/aggregator/check_sampler.go
+++ b/pkg/aggregator/check_sampler.go
@@ -9,7 +9,7 @@ import (
"math"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
diff --git a/pkg/aggregator/check_sampler_bench_test.go b/pkg/aggregator/check_sampler_bench_test.go
index 9f001097d583c..dbb6e8b57bf45 100644
--- a/pkg/aggregator/check_sampler_bench_test.go
+++ b/pkg/aggregator/check_sampler_bench_test.go
@@ -13,8 +13,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/hostname"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
@@ -45,7 +44,7 @@ func benchmarkAddBucket(bucketValue int64, b *testing.B) {
// For some reasons using InitAggregator[WithInterval] doesn't fix the problem,
// but this do.
deps := fxutil.Test[benchmarkDeps](b, core.MockBundle())
- taggerComponent := fxutil.Test[tagger.Mock](b, taggerimpl.MockModule())
+ taggerComponent := mock.SetupFakeTagger(b)
forwarderOpts := forwarder.NewOptionsWithResolvers(pkgconfigsetup.Datadog(), deps.Log, resolver.NewSingleDomainResolvers(map[string][]string{"hello": {"world"}}))
options := DefaultAgentDemultiplexerOptions()
options.DontStartForwarders = true
@@ -74,7 +73,7 @@ func benchmarkAddBucket(bucketValue int64, b *testing.B) {
}
func benchmarkAddBucketWideBounds(bucketValue int64, b *testing.B) {
- taggerComponent := fxutil.Test[tagger.Mock](b, taggerimpl.MockModule())
+ taggerComponent := mock.SetupFakeTagger(b)
checkSampler := newCheckSampler(1, true, true, 1000, tags.NewStore(true, "bench"), checkid.ID("hello:world:1234"), taggerComponent)
bounds := []float64{0, .0005, .001, .003, .005, .007, .01, .015, .02, .025, .03, .04, .05, .06, .07, .08, .09, .1, .5, 1, 5, 10}
diff --git a/pkg/aggregator/check_sampler_test.go b/pkg/aggregator/check_sampler_test.go
index c800ea664272c..02b7e26f568b8 100644
--- a/pkg/aggregator/check_sampler_test.go
+++ b/pkg/aggregator/check_sampler_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
@@ -27,13 +27,13 @@ import (
func generateContextKey(sample metrics.MetricSampleContext) ckey.ContextKey {
k := ckey.NewKeyGenerator()
tb := tagset.NewHashingTagsAccumulator()
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
sample.GetTags(tb, tb, taggerComponent.EnrichTags)
return k.Generate(sample.GetName(), sample.GetHost(), tb)
}
func testCheckGaugeSampling(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
mSample1 := metrics.MetricSample{
@@ -97,7 +97,7 @@ func TestCheckGaugeSampling(t *testing.T) {
}
func testCheckRateSampling(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
mSample1 := metrics.MetricSample{
@@ -151,7 +151,7 @@ func TestCheckRateSampling(t *testing.T) {
}
func testHistogramCountSampling(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
mSample1 := metrics.MetricSample{
@@ -217,7 +217,7 @@ func TestHistogramCountSampling(t *testing.T) {
}
func testCheckHistogramBucketSampling(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
bucket1 := &metrics.HistogramBucket{
@@ -295,7 +295,7 @@ func TestCheckHistogramBucketSampling(t *testing.T) {
}
func testCheckHistogramBucketDontFlushFirstValue(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
bucket1 := &metrics.HistogramBucket{
@@ -351,7 +351,7 @@ func TestCheckHistogramBucketDontFlushFirstValue(t *testing.T) {
}
func testCheckHistogramBucketInfinityBucket(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
bucket1 := &metrics.HistogramBucket{
@@ -387,7 +387,7 @@ func TestCheckHistogramBucketInfinityBucket(t *testing.T) {
}
func testCheckDistribution(t *testing.T, store *tags.Store) {
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
checkSampler := newCheckSampler(1, true, true, 1*time.Second, store, checkid.ID("hello:world:1234"), taggerComponent)
mSample1 := metrics.MetricSample{
diff --git a/pkg/aggregator/context_resolver.go b/pkg/aggregator/context_resolver.go
index 5db6186c1b325..997e26b2f0939 100644
--- a/pkg/aggregator/context_resolver.go
+++ b/pkg/aggregator/context_resolver.go
@@ -9,7 +9,7 @@ import (
"io"
"unsafe"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
"github.com/DataDog/datadog-agent/pkg/metrics"
diff --git a/pkg/aggregator/context_resolver_bench_test.go b/pkg/aggregator/context_resolver_bench_test.go
index a89be1f263d79..3a27d91f99c2a 100644
--- a/pkg/aggregator/context_resolver_bench_test.go
+++ b/pkg/aggregator/context_resolver_bench_test.go
@@ -9,7 +9,7 @@ import (
"strconv"
"testing"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
"github.com/DataDog/datadog-agent/pkg/metrics"
)
@@ -27,7 +27,7 @@ func benchmarkContextResolver(numContexts int, b *testing.B) {
})
}
cache := tags.NewStore(true, "test")
- cr := newContextResolver(nooptagger.NewTaggerClient(), cache, "0")
+ cr := newContextResolver(nooptagger.NewComponent(), cache, "0")
b.ResetTimer()
for n := 0; n < b.N; n++ {
diff --git a/pkg/aggregator/context_resolver_test.go b/pkg/aggregator/context_resolver_test.go
index 6ad790c00c90e..297ab806df611 100644
--- a/pkg/aggregator/context_resolver_test.go
+++ b/pkg/aggregator/context_resolver_test.go
@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
"github.com/DataDog/datadog-agent/pkg/metrics"
@@ -75,7 +75,7 @@ func testTrackContext(t *testing.T, store *tags.Store) {
SampleRate: 1,
}
- contextResolver := newContextResolver(nooptagger.NewTaggerClient(), store, "test")
+ contextResolver := newContextResolver(nooptagger.NewComponent(), store, "test")
// Track the 2 contexts
contextKey1 := contextResolver.trackContext(&mSample1, 0)
@@ -139,7 +139,7 @@ func testExpireContexts(t *testing.T, store *tags.Store) {
Tags: []string{"foo"},
SampleRate: 1,
}
- contextResolver := newTimestampContextResolver(nooptagger.NewTaggerClient(), store, "test", 2, 4)
+ contextResolver := newTimestampContextResolver(nooptagger.NewComponent(), store, "test", 2, 4)
// Track the 2 contexts
contextKey1 := contextResolver.trackContext(&mSample1, 4) // expires after 6
@@ -191,7 +191,7 @@ func testCountBasedExpireContexts(t *testing.T, store *tags.Store) {
mSample1 := metrics.MetricSample{Name: "my.metric.name1"}
mSample2 := metrics.MetricSample{Name: "my.metric.name2"}
mSample3 := metrics.MetricSample{Name: "my.metric.name3"}
- contextResolver := newCountBasedContextResolver(2, store, nooptagger.NewTaggerClient(), "test")
+ contextResolver := newCountBasedContextResolver(2, store, nooptagger.NewComponent(), "test")
contextKey1 := contextResolver.trackContext(&mSample1)
contextKey2 := contextResolver.trackContext(&mSample2)
@@ -216,7 +216,7 @@ func TestCountBasedExpireContexts(t *testing.T) {
}
func testTagDeduplication(t *testing.T, store *tags.Store) {
- resolver := newContextResolver(nooptagger.NewTaggerClient(), store, "test")
+ resolver := newContextResolver(nooptagger.NewComponent(), store, "test")
ckey := resolver.trackContext(&metrics.MetricSample{
Name: "foo",
@@ -254,7 +254,7 @@ func (s *mockSample) GetTags(tb, mb tagset.TagsAccumulator, _ metrics.EnrichTags
}
func TestOriginTelemetry(t *testing.T) {
- r := newContextResolver(nooptagger.NewTaggerClient(), tags.NewStore(true, "test"), "test")
+ r := newContextResolver(nooptagger.NewComponent(), tags.NewStore(true, "test"), "test")
r.trackContext(&mockSample{"foo", []string{"foo"}, []string{"ook"}}, 0)
r.trackContext(&mockSample{"foo", []string{"foo"}, []string{"eek"}}, 0)
r.trackContext(&mockSample{"foo", []string{"bar"}, []string{"ook"}}, 0)
diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go
index bc2e424b33199..a420537fccd25 100644
--- a/pkg/aggregator/demultiplexer_agent.go
+++ b/pkg/aggregator/demultiplexer_agent.go
@@ -13,7 +13,7 @@ import (
"time"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator"
diff --git a/pkg/aggregator/demultiplexer_agent_test.go b/pkg/aggregator/demultiplexer_agent_test.go
index 13d75d0cfaa56..f2703abc6ec17 100644
--- a/pkg/aggregator/demultiplexer_agent_test.go
+++ b/pkg/aggregator/demultiplexer_agent_test.go
@@ -16,8 +16,8 @@ import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
@@ -160,7 +160,7 @@ type DemultiplexerAgentTestDeps struct {
}
func createDemultiplexerAgentTestDeps(t *testing.T) DemultiplexerAgentTestDeps {
- taggerComponent := fxutil.Test[tagger.Mock](t, taggerimpl.MockModule())
+ taggerComponent := mock.SetupFakeTagger(t)
return fxutil.Test[DemultiplexerAgentTestDeps](
t,
diff --git a/pkg/aggregator/demultiplexer_mock.go b/pkg/aggregator/demultiplexer_mock.go
index 4e9f6ea5c8d11..917a788795957 100644
--- a/pkg/aggregator/demultiplexer_mock.go
+++ b/pkg/aggregator/demultiplexer_mock.go
@@ -12,7 +12,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/hostname"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
@@ -33,5 +33,5 @@ type TestDeps struct {
func InitAndStartAgentDemultiplexerForTest(deps TestDeps, options AgentDemultiplexerOptions, hostname string) *AgentDemultiplexer {
orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{})
eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname))
- return InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.Compressor, nooptagger.NewTaggerClient(), hostname)
+ return InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.Compressor, nooptagger.NewComponent(), hostname)
}
diff --git a/pkg/aggregator/demultiplexer_serverless.go b/pkg/aggregator/demultiplexer_serverless.go
index fe03010e9e88b..a2429988b0cbd 100644
--- a/pkg/aggregator/demultiplexer_serverless.go
+++ b/pkg/aggregator/demultiplexer_serverless.go
@@ -12,7 +12,7 @@ import (
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
diff --git a/pkg/aggregator/demultiplexer_test.go b/pkg/aggregator/demultiplexer_test.go
index 03e668898d906..6c3dd77976bdf 100644
--- a/pkg/aggregator/demultiplexer_test.go
+++ b/pkg/aggregator/demultiplexer_test.go
@@ -14,7 +14,7 @@ import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/core"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
@@ -173,7 +173,7 @@ func TestDemuxFlushAggregatorToSerializer(t *testing.T) {
opts := demuxTestOptions()
opts.FlushInterval = time.Hour
deps := createDemuxDeps(t, opts, eventplatformimpl.NewDefaultParams())
- demux := initAgentDemultiplexer(deps.Log, deps.SharedForwarder, deps.OrchestratorFwd, opts, deps.EventPlatformFwd, deps.Compressor, nooptagger.NewTaggerClient(), "")
+ demux := initAgentDemultiplexer(deps.Log, deps.SharedForwarder, deps.OrchestratorFwd, opts, deps.EventPlatformFwd, deps.Compressor, nooptagger.NewComponent(), "")
demux.Aggregator().tlmContainerTagsEnabled = false
require.NotNil(demux)
require.NotNil(demux.aggregator)
@@ -300,7 +300,7 @@ func createDemuxDepsWithOrchestratorFwd(
return aggregatorDeps{
TestDeps: deps.TestDeps,
- Demultiplexer: InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, deps.OrchestratorForwarder, opts, deps.Eventplatform, deps.Compressor, nooptagger.NewTaggerClient(), ""),
+ Demultiplexer: InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, deps.OrchestratorForwarder, opts, deps.Eventplatform, deps.Compressor, nooptagger.NewComponent(), ""),
OrchestratorFwd: deps.OrchestratorForwarder,
EventPlatformFwd: deps.Eventplatform,
}
diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go
index f1f9155430f8c..e4403bdc352ef 100644
--- a/pkg/aggregator/mocksender/mocksender.go
+++ b/pkg/aggregator/mocksender/mocksender.go
@@ -19,7 +19,7 @@ import (
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
"github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
@@ -42,7 +42,7 @@ func CreateDefaultDemultiplexer() *aggregator.AgentDemultiplexer {
sharedForwarder := defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil))
orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{})
eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostnameimpl.NewHostnameService()))
- taggerComponent := nooptagger.NewTaggerClient()
+ taggerComponent := nooptagger.NewComponent()
return aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, compressionimpl.NewMockCompressor(), taggerComponent, "")
}
diff --git a/pkg/aggregator/no_aggregation_stream_worker.go b/pkg/aggregator/no_aggregation_stream_worker.go
index 9f2b2cb83e98e..950322483a1a0 100644
--- a/pkg/aggregator/no_aggregation_stream_worker.go
+++ b/pkg/aggregator/no_aggregation_stream_worker.go
@@ -9,7 +9,7 @@ import (
"expvar"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/util"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/metrics"
diff --git a/pkg/aggregator/sender_test.go b/pkg/aggregator/sender_test.go
index 19b6a01b74c47..17713653a59bc 100644
--- a/pkg/aggregator/sender_test.go
+++ b/pkg/aggregator/sender_test.go
@@ -19,7 +19,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/hostname"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
@@ -58,7 +58,7 @@ func testDemux(log log.Component, hostname hostname.Component) *AgentDemultiplex
opts.DontStartForwarders = true
orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{})
eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname))
- demux := initAgentDemultiplexer(log, NewForwarderTest(log), &orchestratorForwarder, opts, eventPlatformForwarder, compressionimpl.NewMockCompressor(), nooptagger.NewTaggerClient(), defaultHostname)
+ demux := initAgentDemultiplexer(log, NewForwarderTest(log), &orchestratorForwarder, opts, eventPlatformForwarder, compressionimpl.NewMockCompressor(), nooptagger.NewComponent(), defaultHostname)
return demux
}
diff --git a/pkg/aggregator/time_sampler.go b/pkg/aggregator/time_sampler.go
index 677cad0bc51a3..704a49d305d53 100644
--- a/pkg/aggregator/time_sampler.go
+++ b/pkg/aggregator/time_sampler.go
@@ -10,7 +10,7 @@ import (
"io"
"strconv"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
diff --git a/pkg/aggregator/time_sampler_test.go b/pkg/aggregator/time_sampler_test.go
index 89c1db534a574..9ff6f2fd13be5 100644
--- a/pkg/aggregator/time_sampler_test.go
+++ b/pkg/aggregator/time_sampler_test.go
@@ -17,7 +17,7 @@ import (
"github.com/DataDog/opentelemetry-mapping-go/pkg/quantile"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator/ckey"
"github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags"
"github.com/DataDog/datadog-agent/pkg/metrics"
@@ -34,7 +34,7 @@ func generateSerieContextKey(serie *metrics.Serie) ckey.ContextKey {
}
func testTimeSampler(store *tags.Store) *TimeSampler {
- sampler := NewTimeSampler(TimeSamplerID(0), 10, store, nooptagger.NewTaggerClient(), "host")
+ sampler := NewTimeSampler(TimeSamplerID(0), 10, store, nooptagger.NewComponent(), "host")
return sampler
}
@@ -535,7 +535,7 @@ func TestFlushMissingContext(t *testing.T) {
}
func benchmarkTimeSampler(b *testing.B, store *tags.Store) {
- sampler := NewTimeSampler(TimeSamplerID(0), 10, store, nooptagger.NewTaggerClient(), "host")
+ sampler := NewTimeSampler(TimeSamplerID(0), 10, store, nooptagger.NewComponent(), "host")
sample := metrics.MetricSample{
Name: "my.metric.name",
diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go
index a571f1bf03c67..f88bd1b8a2dc2 100644
--- a/pkg/cli/subcommands/check/command.go
+++ b/pkg/cli/subcommands/check/command.go
@@ -39,14 +39,15 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
"github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
+ remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def"
"github.com/DataDog/datadog-agent/comp/core/secrets"
"github.com/DataDog/datadog-agent/comp/core/settings"
"github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl"
"github.com/DataDog/datadog-agent/comp/core/status"
"github.com/DataDog/datadog-agent/comp/core/status/statusimpl"
"github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ dualTaggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx-dual"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
wmcatalog "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/catalog"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
@@ -172,8 +173,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command {
apiimpl.Module(),
authtokenimpl.Module(),
fx.Supply(context.Background()),
- fx.Provide(tagger.NewTaggerParamsForCoreAgent),
- taggerimpl.Module(),
+ dualTaggerfx.Module(common.DualTaggerParams()),
autodiscoveryimpl.Module(),
forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithNoopForwarder())),
inventorychecksimpl.Module(),
@@ -208,6 +208,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command {
fx.Provide(func() server.Component { return nil }),
fx.Provide(func() replay.Component { return nil }),
fx.Provide(func() pidmap.Component { return nil }),
+ fx.Provide(func() remoteagentregistry.Component { return nil }),
getPlatformModules(),
jmxloggerimpl.Module(jmxloggerimpl.NewDisabledParams()),
diff --git a/pkg/cli/subcommands/taggerlist/command.go b/pkg/cli/subcommands/taggerlist/command.go
index 7b1e239c30574..bef2e452e728b 100644
--- a/pkg/cli/subcommands/taggerlist/command.go
+++ b/pkg/cli/subcommands/taggerlist/command.go
@@ -14,7 +14,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/api"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/api"
"github.com/DataDog/datadog-agent/pkg/api/util"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/util/flavor"
diff --git a/pkg/collector/corechecks/cluster/ksm/customresources/vpa.go b/pkg/collector/corechecks/cluster/ksm/customresources/vpa.go
index fe4489b68e37d..289492ee7bcd6 100644
--- a/pkg/collector/corechecks/cluster/ksm/customresources/vpa.go
+++ b/pkg/collector/corechecks/cluster/ksm/customresources/vpa.go
@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
- v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
+ vpav1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
@@ -69,7 +69,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
annotationKeys, annotationValues := kubeMapToPrometheusLabels("annotation", a.Annotations)
return &metric.Family{
Metrics: []*metric.Metric{
@@ -88,7 +88,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
labelKeys, labelValues := kubeMapToPrometheusLabels("label", a.Labels)
return &metric.Family{
Metrics: []*metric.Metric{
@@ -107,7 +107,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Spec.UpdatePolicy == nil || a.Spec.UpdatePolicy.UpdateMode == nil {
@@ -116,11 +116,11 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
}
}
- for _, mode := range []v1.UpdateMode{
- v1.UpdateModeOff,
- v1.UpdateModeInitial,
- v1.UpdateModeRecreate,
- v1.UpdateModeAuto,
+ for _, mode := range []vpav1beta2.UpdateMode{
+ vpav1beta2.UpdateModeOff,
+ vpav1beta2.UpdateModeInitial,
+ vpav1beta2.UpdateModeRecreate,
+ vpav1beta2.UpdateModeAuto,
} {
var v float64
if *a.Spec.UpdatePolicy.UpdateMode == mode {
@@ -146,7 +146,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Spec.ResourcePolicy == nil || a.Spec.ResourcePolicy.ContainerPolicies == nil {
return &metric.Family{
@@ -169,7 +169,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Spec.ResourcePolicy == nil || a.Spec.ResourcePolicy.ContainerPolicies == nil {
return &metric.Family{
@@ -191,7 +191,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Status.Recommendation == nil || a.Status.Recommendation.ContainerRecommendations == nil {
return &metric.Family{
@@ -213,7 +213,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Status.Recommendation == nil || a.Status.Recommendation.ContainerRecommendations == nil {
return &metric.Family{
@@ -235,7 +235,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Status.Recommendation == nil || a.Status.Recommendation.ContainerRecommendations == nil {
return &metric.Family{
@@ -256,7 +256,7 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
metric.Gauge,
basemetrics.ALPHA,
"",
- wrapVPAFunc(func(a *v1.VerticalPodAutoscaler) *metric.Family {
+ wrapVPAFunc(func(a *vpav1beta2.VerticalPodAutoscaler) *metric.Family {
ms := []*metric.Metric{}
if a.Status.Recommendation == nil || a.Status.Recommendation.ContainerRecommendations == nil {
return &metric.Family{
@@ -275,10 +275,10 @@ func (f *vpaFactory) MetricFamilyGenerators() []generator.FamilyGenerator {
}
func (f *vpaFactory) ExpectedType() interface{} {
- return &v1.VerticalPodAutoscaler{
+ return &vpav1beta2.VerticalPodAutoscaler{
TypeMeta: metav1.TypeMeta{
Kind: "VerticalPodAutoscaler",
- APIVersion: v1.SchemeGroupVersion.String(),
+ APIVersion: vpav1beta2.SchemeGroupVersion.String(),
},
}
}
@@ -309,9 +309,9 @@ func vpaResourcesToMetrics(containerName string, resources corev1.ResourceList)
return ms
}
-func wrapVPAFunc(f func(*v1.VerticalPodAutoscaler) *metric.Family) func(interface{}) *metric.Family {
+func wrapVPAFunc(f func(*vpav1beta2.VerticalPodAutoscaler) *metric.Family) func(interface{}) *metric.Family {
return func(obj interface{}) *metric.Family {
- vpa := obj.(*v1.VerticalPodAutoscaler)
+ vpa := obj.(*vpav1beta2.VerticalPodAutoscaler)
metricFamily := f(vpa)
targetRef := vpa.Spec.TargetRef
@@ -338,11 +338,11 @@ func (f *vpaFactory) ListWatch(customResourceClient interface{}, ns string, fiel
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
opts.FieldSelector = fieldSelector
- return vpaClient.AutoscalingV1().VerticalPodAutoscalers(ns).List(ctx, opts)
+ return vpaClient.AutoscalingV1beta2().VerticalPodAutoscalers(ns).List(ctx, opts)
},
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
opts.FieldSelector = fieldSelector
- return vpaClient.AutoscalingV1().VerticalPodAutoscalers(ns).Watch(ctx, opts)
+ return vpaClient.AutoscalingV1beta2().VerticalPodAutoscalers(ns).Watch(ctx, opts)
},
}
}
diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go
index cd80b4ff97f03..97638ee17d2e3 100644
--- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go
+++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go
@@ -72,7 +72,7 @@ var extendedCollectors = map[string]string{
var collectorNameReplacement = map[string]string{
// verticalpodautoscalers were removed from the built-in KSM metrics in KSM 2.9, and the changes made to
// the KSM builder in KSM 2.9 result in the detected custom resource store name being different.
- "verticalpodautoscalers": "autoscaling.k8s.io/v1, Resource=verticalpodautoscalers",
+ "verticalpodautoscalers": "autoscaling.k8s.io/v1beta2, Resource=verticalpodautoscalers",
}
var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events.go
index b7c42d6c299f7..d8387f6be0507 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events.go
@@ -10,7 +10,7 @@ package kubernetesapiserver
import (
v1 "k8s.io/api/core/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
)
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events_test.go
index 8d25b8c803433..0bd5ba911020e 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/bundled_events_test.go
@@ -17,7 +17,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
+
"github.com/DataDog/datadog-agent/pkg/metrics/event"
)
@@ -117,7 +118,7 @@ func TestBundledEventsTransform(t *testing.T) {
},
}
- taggerInstance := taggerimpl.SetupFakeTagger(t)
+ taggerInstance := mock.SetupFakeTagger(t)
tests := []struct {
name string
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
index 5b1c67017b1c0..6d0504f8e1d2f 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go
@@ -17,7 +17,7 @@ import (
"github.com/patrickmn/go-cache"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/kubetags"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util"
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
index dcdd997f4242d..53fe552d08581 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go
@@ -15,15 +15,11 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/local"
- "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
+ mockTagger "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
- coretelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry"
- "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
"github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
- "github.com/DataDog/datadog-agent/pkg/util/fxutil"
)
func TestGetDDAlertType(t *testing.T) {
@@ -57,10 +53,7 @@ func TestGetDDAlertType(t *testing.T) {
}
func Test_getInvolvedObjectTags(t *testing.T) {
- telemetryComponent := fxutil.Test[coretelemetry.Component](t, telemetryimpl.MockModule())
- telemetryStore := telemetry.NewStore(telemetryComponent)
- cfg := configmock.New(t)
- taggerInstance := local.NewFakeTagger(cfg, telemetryStore)
+ taggerInstance := mockTagger.New(t)
taggerInstance.SetTags(types.NewEntityID(types.KubernetesPodUID, "nginx"), "workloadmeta-kubernetes_pod", nil, []string{"additional_pod_tag:nginx"}, nil, nil)
taggerInstance.SetTags(types.NewEntityID(types.KubernetesDeployment, "workload-redis/my-deployment-1"), "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-1"}, nil, nil)
taggerInstance.SetTags(types.NewEntityID(types.KubernetesDeployment, "default/my-deployment-2"), "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-2"}, nil, nil)
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go
index 37113ed5d1ae0..7603251ab5ed4 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go
@@ -20,7 +20,7 @@ import (
v1 "k8s.io/api/core/v1"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
core "github.com/DataDog/datadog-agent/pkg/collector/corechecks"
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver_test.go
index ca421c88b97cb..3c0d44ee58824 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver_test.go
@@ -14,7 +14,7 @@ import (
v1 "k8s.io/api/core/v1"
obj "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
core "github.com/DataDog/datadog-agent/pkg/collector/corechecks"
"github.com/DataDog/datadog-agent/pkg/metrics/servicecheck"
@@ -83,7 +83,7 @@ func TestParseComponentStatus(t *testing.T) {
Items: nil,
}
- tagger := taggerimpl.SetupFakeTagger(t)
+ tagger := mock.SetupFakeTagger(t)
// FIXME: use the factory instead
kubeASCheck := NewKubeASCheck(core.NewCheckBase(CheckName), &KubeASConfig{}, tagger)
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go
index 4fc133477a85d..e574ee5601ff5 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go
@@ -17,7 +17,7 @@ import (
v1 "k8s.io/api/core/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
)
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go
index 224f2746a3c3d..0e5486f26c567 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go
@@ -17,7 +17,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
+
"github.com/DataDog/datadog-agent/pkg/metrics/event"
)
@@ -185,7 +186,7 @@ func TestFormatEvent(t *testing.T) {
b.addEvent(ev)
}
- output, err := b.formatEvents(taggerimpl.SetupFakeTagger(t))
+ output, err := b.formatEvents(mock.SetupFakeTagger(t))
assert.Nil(t, err)
assert.Equal(t, tt.expected.Text, output.Text)
@@ -247,7 +248,7 @@ func TestEventsTagging(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
bundle := newKubernetesEventBundler("", tt.k8sEvent)
bundle.addEvent(tt.k8sEvent)
- got, err := bundle.formatEvents(taggerimpl.SetupFakeTagger(t))
+ got, err := bundle.formatEvents(mock.SetupFakeTagger(t))
assert.NoError(t, err)
assert.ElementsMatch(t, tt.expectedTags, got.Tags)
})
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go
index 9145bd72f3b24..d64c91e7513b2 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go
@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -34,7 +34,7 @@ func TestReportClusterQuotas(t *testing.T) {
pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", "test-cluster-name")
defer pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", prevClusterName)
- tagger := taggerimpl.SetupFakeTagger(t)
+ tagger := mock.SetupFakeTagger(t)
instanceCfg := []byte("")
initCfg := []byte("")
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go
index c43a33bfd3275..f50f78bde14f0 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go
@@ -9,7 +9,7 @@
package kubernetesapiserver
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
)
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
index 9136bd9f52b4e..71b384f6270b5 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go
@@ -12,7 +12,7 @@ import (
v1 "k8s.io/api/core/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/tagset"
diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
index 36a35e21ebd4f..1d7f8b0809c4a 100644
--- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
+++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events_test.go
@@ -17,7 +17,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/tagset"
)
@@ -137,7 +137,7 @@ func TestUnbundledEventsTransform(t *testing.T) {
},
}
- taggerInstance := taggerimpl.SetupFakeTagger(t)
+ taggerInstance := mock.SetupFakeTagger(t)
tests := []struct {
name string
@@ -655,7 +655,7 @@ func TestUnbundledEventsTransformFiltering(t *testing.T) {
},
}
- taggerInstance := taggerimpl.SetupFakeTagger(t)
+ taggerInstance := mock.SetupFakeTagger(t)
tests := []struct {
name string
@@ -901,7 +901,7 @@ func TestUnbundledEventsTransformFiltering(t *testing.T) {
}
func TestGetTagsFromTagger(t *testing.T) {
- taggerInstance := taggerimpl.SetupFakeTagger(t)
+ taggerInstance := mock.SetupFakeTagger(t)
taggerInstance.SetGlobalTags([]string{"global:here"}, nil, nil, nil)
tests := []struct {
@@ -935,7 +935,7 @@ func TestGetTagsFromTagger(t *testing.T) {
}
func TestUnbundledEventsShouldCollect(t *testing.T) {
- taggerInstance := taggerimpl.SetupFakeTagger(t)
+ taggerInstance := mock.SetupFakeTagger(t)
tests := []struct {
name string
diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go
index bff76168e2054..d87c43156d107 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go
@@ -11,7 +11,7 @@ package ecs
import (
"fmt"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors"
@@ -100,7 +100,12 @@ func (t *TaskCollector) fetchContainers(rcfg *collectors.CollectorRunConfig, tas
for _, container := range task.Containers {
c, err := rcfg.WorkloadmetaStore.GetContainer(container.ID)
if err != nil {
- log.Errorc(err.Error(), orchestrator.ExtraLogContext...)
+ // ECS can create internal pause containers that are not available in the workloadmeta store.
+ // https://github.com/DataDog/datadog-agent/blob/7.58.0/pkg/util/containers/filter.go#L184
+ // It is standard for tasks running with the awsvpc network mode
+ // https://github.com/aws/amazon-ecs-agent/blob/v1.88.0/agent/api/task/task.go#L68
+ // We can ignore the error and continue as there is nothing we can do about it.
+ log.Debugc(err.Error(), orchestrator.ExtraLogContext...)
continue
}
ecsTask.Containers = append(ecsTask.Containers, c)
diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go
index 0521adce52d47..6372a6ce10f25 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go
@@ -12,7 +12,7 @@ import (
"fmt"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors"
k8sCollectors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go
index 6c5bbb321b60f..de25efa0b01e4 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go
@@ -9,7 +9,7 @@ package k8s
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_test.go b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_test.go
index 99dabadf1a215..5d1c524aea218 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_test.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_test.go
@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -33,7 +33,7 @@ func TestWalkAPIResources(t *testing.T) {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
inventory := inventory.NewCollectorInventory(cfg, mockStore, fakeTagger)
provider := NewAPIServerDiscoveryProvider()
diff --git a/pkg/collector/corechecks/cluster/orchestrator/manifest_buffer_test.go b/pkg/collector/corechecks/cluster/orchestrator/manifest_buffer_test.go
index e4b8941f24808..4585bf23fdd6d 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/manifest_buffer_test.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/manifest_buffer_test.go
@@ -20,7 +20,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -109,7 +109,7 @@ func getManifestBuffer(t *testing.T) *ManifestBuffer {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
orchCheck := newCheck(cfg, mockStore, fakeTagger).(*OrchestratorCheck)
mb := NewManifestBuffer(orchCheck)
diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go
index f557174398836..8476bceed7146 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go
@@ -16,7 +16,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
configcomp "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go
index 9bb9340e5046f..cf6920088a0cf 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go
@@ -16,7 +16,7 @@ import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -43,7 +43,7 @@ func newCollectorBundle(t *testing.T, chk *OrchestratorCheck) *CollectorBundle {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
bundle := &CollectorBundle{
discoverCollectors: chk.orchestratorConfig.CollectorDiscoveryEnabled,
@@ -82,7 +82,7 @@ func TestOrchestratorCheckSafeReSchedule(t *testing.T) {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
orchCheck := newCheck(cfg, mockStore, fakeTagger).(*OrchestratorCheck)
orchCheck.apiClient = cl
diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/processors/ecs/task.go
index c5131f61d7cf8..06a1cf0ad6246 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/processors/ecs/task.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/processors/ecs/task.go
@@ -13,7 +13,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/common"
transformers "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go
index 645aba5787c98..6f3a62dda8848 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go
@@ -14,7 +14,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/tags"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/clc_provider.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/clc_provider.go
index 1341d8390d849..952a164a96321 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/clc_provider.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/clc_provider.go
@@ -9,7 +9,7 @@ package podtagprovider
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/collectors"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/collectors"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
kubernetesresourceparsers "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util/kubernetes_resource_parsers"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/node_provider.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/node_provider.go
index 0c80fe420a317..90202f7e61599 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/node_provider.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/node_provider.go
@@ -10,7 +10,7 @@ package podtagprovider
import (
corev1 "k8s.io/api/core/v1"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
)
diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/provider.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/provider.go
index 11d4465c578ba..74a9324d8e9ef 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/provider.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod_tag_provider/provider.go
@@ -10,7 +10,7 @@ package podtagprovider
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/stub.go b/pkg/collector/corechecks/cluster/orchestrator/stub.go
index b2c0226f8a47c..15385f8d4b272 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/stub.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/stub.go
@@ -10,7 +10,7 @@ package orchestrator
import (
configcomp "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go
index 3b8bd25a28df6..70567bd71f69a 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go
@@ -19,7 +19,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/orchestrator"
diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task_test.go
index aac8f01a64e24..70a1b8274604f 100644
--- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task_test.go
+++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task_test.go
@@ -15,7 +15,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/util/pointer"
)
@@ -23,7 +23,7 @@ import (
func TestExtractECSTask(t *testing.T) {
now := time.Date(2024, 1, 1, 11, 1, 1, 1, time.UTC)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
actual := ExtractECSTask(TaskWithContainers{
Task: &workloadmeta.ECSTask{
diff --git a/pkg/collector/corechecks/containerimage/check.go b/pkg/collector/corechecks/containerimage/check.go
index 7479ebd236fbb..1656d568e3632 100644
--- a/pkg/collector/corechecks/containerimage/check.go
+++ b/pkg/collector/corechecks/containerimage/check.go
@@ -13,7 +13,7 @@ import (
"gopkg.in/yaml.v2"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/containerimage/processor.go b/pkg/collector/corechecks/containerimage/processor.go
index 8998bc5435b7f..d34bd492b1cb1 100644
--- a/pkg/collector/corechecks/containerimage/processor.go
+++ b/pkg/collector/corechecks/containerimage/processor.go
@@ -10,7 +10,7 @@ import (
"strings"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
diff --git a/pkg/collector/corechecks/containerimage/processor_test.go b/pkg/collector/corechecks/containerimage/processor_test.go
index 669c17d7d0884..b6c89960d30fc 100644
--- a/pkg/collector/corechecks/containerimage/processor_test.go
+++ b/pkg/collector/corechecks/containerimage/processor_test.go
@@ -18,7 +18,7 @@ import (
"go.uber.org/atomic"
"google.golang.org/protobuf/proto"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
@@ -27,7 +27,7 @@ import (
)
func TestProcessEvents(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
tests := []struct {
name string
diff --git a/pkg/collector/corechecks/containers/containerd/check.go b/pkg/collector/corechecks/containers/containerd/check.go
index 1c24fa3cdf650..ab901f4a371a1 100644
--- a/pkg/collector/corechecks/containers/containerd/check.go
+++ b/pkg/collector/corechecks/containers/containerd/check.go
@@ -19,7 +19,7 @@ import (
"gopkg.in/yaml.v2"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/containers/containerd/check_metrics_extension.go b/pkg/collector/corechecks/containers/containerd/check_metrics_extension.go
index d2d1ad8ddaa56..ecf1fea944cc2 100644
--- a/pkg/collector/corechecks/containers/containerd/check_metrics_extension.go
+++ b/pkg/collector/corechecks/containers/containerd/check_metrics_extension.go
@@ -10,7 +10,7 @@ package containerd
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
diff --git a/pkg/collector/corechecks/containers/containerd/check_test.go b/pkg/collector/corechecks/containers/containerd/check_test.go
index 46019f8cb72e5..52efeb7161383 100644
--- a/pkg/collector/corechecks/containers/containerd/check_test.go
+++ b/pkg/collector/corechecks/containers/containerd/check_test.go
@@ -15,7 +15,7 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic"
@@ -56,7 +56,7 @@ func TestContainerdCheckGenericPart(t *testing.T) {
// Should never been called as we are in the Docker check
generic.CreateContainerMeta("docker", "cID101"),
}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containersStats := map[string]mock.ContainerEntry{
"cID100": mock.GetFullSampleContainerEntry(),
diff --git a/pkg/collector/corechecks/containers/containerd/events_test.go b/pkg/collector/corechecks/containers/containerd/events_test.go
index e24a807ced4ab..9f94c2f358839 100644
--- a/pkg/collector/corechecks/containers/containerd/events_test.go
+++ b/pkg/collector/corechecks/containers/containerd/events_test.go
@@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -286,7 +286,7 @@ func TestCheckEvents_PauseContainers(t *testing.T) {
// TestComputeEvents checks the conversion of Containerd events to Datadog events
func TestComputeEvents(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containerdCheck := &ContainerdCheck{
instance: &ContainerdConfig{},
CheckBase: corechecks.NewCheckBase("containerd"),
diff --git a/pkg/collector/corechecks/containers/containerd/stub.go b/pkg/collector/corechecks/containers/containerd/stub.go
index 42a7f44a99e26..f0ea5996d6349 100644
--- a/pkg/collector/corechecks/containers/containerd/stub.go
+++ b/pkg/collector/corechecks/containers/containerd/stub.go
@@ -9,7 +9,7 @@
package containerd
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/containers/cri/check.go b/pkg/collector/corechecks/containers/cri/check.go
index 38810a06bee13..3e4b6f545fe66 100644
--- a/pkg/collector/corechecks/containers/cri/check.go
+++ b/pkg/collector/corechecks/containers/cri/check.go
@@ -14,7 +14,7 @@ import (
yaml "gopkg.in/yaml.v2"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/containers/cri/check_metrics_extension.go b/pkg/collector/corechecks/containers/cri/check_metrics_extension.go
index a07e262f0ae18..54c752cd97e99 100644
--- a/pkg/collector/corechecks/containers/cri/check_metrics_extension.go
+++ b/pkg/collector/corechecks/containers/cri/check_metrics_extension.go
@@ -10,7 +10,7 @@ package cri
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic"
diff --git a/pkg/collector/corechecks/containers/cri/check_test.go b/pkg/collector/corechecks/containers/cri/check_test.go
index f7f052ef84da6..08fe38f7f910e 100644
--- a/pkg/collector/corechecks/containers/cri/check_test.go
+++ b/pkg/collector/corechecks/containers/cri/check_test.go
@@ -10,7 +10,7 @@ package cri
import (
"testing"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic"
"github.com/DataDog/datadog-agent/pkg/util/containers/cri"
@@ -37,7 +37,7 @@ func TestCriCheck(t *testing.T) {
"cID100": mock.GetFullSampleContainerEntry(),
"cID101": mock.GetFullSampleContainerEntry(),
}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// Inject mock processor in check
mockCri := &crimock.MockCRIClient{}
diff --git a/pkg/collector/corechecks/containers/cri/stub.go b/pkg/collector/corechecks/containers/cri/stub.go
index 66cb710de146b..9e09d380112fd 100644
--- a/pkg/collector/corechecks/containers/cri/stub.go
+++ b/pkg/collector/corechecks/containers/cri/stub.go
@@ -9,7 +9,7 @@
package cri
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/containers/docker/bundled_events.go b/pkg/collector/corechecks/containers/docker/bundled_events.go
index fc1c5aa525c79..1bcfdd92d043e 100644
--- a/pkg/collector/corechecks/containers/docker/bundled_events.go
+++ b/pkg/collector/corechecks/containers/docker/bundled_events.go
@@ -8,7 +8,7 @@
package docker
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/util/docker"
"github.com/DataDog/datadog-agent/pkg/util/log"
diff --git a/pkg/collector/corechecks/containers/docker/check.go b/pkg/collector/corechecks/containers/docker/check.go
index d66e303e3bffc..3d4ea3f0567eb 100644
--- a/pkg/collector/corechecks/containers/docker/check.go
+++ b/pkg/collector/corechecks/containers/docker/check.go
@@ -20,7 +20,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
diff --git a/pkg/collector/corechecks/containers/docker/check_linux_test.go b/pkg/collector/corechecks/containers/docker/check_linux_test.go
index 563696e6e73b6..4081b00e0f220 100644
--- a/pkg/collector/corechecks/containers/docker/check_linux_test.go
+++ b/pkg/collector/corechecks/containers/docker/check_linux_test.go
@@ -13,7 +13,7 @@ import (
dockerTypes "github.com/docker/docker/api/types"
dockerNetworkTypes "github.com/docker/docker/api/types/network"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic"
@@ -245,7 +245,7 @@ func TestDockerNetworkExtension(t *testing.T) {
dockerNetworkExtension.Process(tags, container1, mockCollector, 0)
dockerNetworkExtension.Process(tags, container2, mockCollector, 0)
dockerNetworkExtension.Process(tags, container4, mockCollector, 0)
- dockerNetworkExtension.PostProcess(nooptagger.NewTaggerClient())
+ dockerNetworkExtension.PostProcess(nooptagger.NewComponent())
// Running the custom part
dockerNetworkExtension.preRun()
diff --git a/pkg/collector/corechecks/containers/docker/check_metrics_extension.go b/pkg/collector/corechecks/containers/docker/check_metrics_extension.go
index 267d1ec5fe536..c07adb175f433 100644
--- a/pkg/collector/corechecks/containers/docker/check_metrics_extension.go
+++ b/pkg/collector/corechecks/containers/docker/check_metrics_extension.go
@@ -11,7 +11,7 @@ import (
"math"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic"
diff --git a/pkg/collector/corechecks/containers/docker/check_network.go b/pkg/collector/corechecks/containers/docker/check_network.go
index ffdd3ad7ad04e..3beda817a84fb 100644
--- a/pkg/collector/corechecks/containers/docker/check_network.go
+++ b/pkg/collector/corechecks/containers/docker/check_network.go
@@ -16,7 +16,7 @@ import (
dockerTypes "github.com/docker/docker/api/types"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
diff --git a/pkg/collector/corechecks/containers/docker/check_test.go b/pkg/collector/corechecks/containers/docker/check_test.go
index 23aa7b690336e..27206c9a51ab1 100644
--- a/pkg/collector/corechecks/containers/docker/check_test.go
+++ b/pkg/collector/corechecks/containers/docker/check_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
@@ -42,7 +42,7 @@ func TestDockerCheckGenericPart(t *testing.T) {
// Should never been called as we are in the Docker check
generic.CreateContainerMeta("containerd", "cID101"),
}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containersStats := map[string]mock.ContainerEntry{
"cID100": mock.GetFullSampleContainerEntry(),
@@ -115,7 +115,7 @@ func TestDockerCustomPart(t *testing.T) {
mockSender := mocksender.NewMockSender(checkid.ID(t.Name()))
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "e2d5394a5321d4a59497f53552a0131b2aafe64faba37f4738e78c531289fc45"), "foo", []string{"image_name:datadog/agent", "short:agent", "tag:latest"}, nil, nil, nil)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "b781900d227cf8d63a0922705018b66610f789644bf236cb72c8698b31383074"), "foo", []string{"image_name:datadog/agent", "short:agent", "tag:7.32.0-rc.1"}, nil, nil, nil)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "be2584a7d1a2a3ae9f9c688e9ce7a88991c028507fec7c70a660b705bd2a5b90"), "foo", []string{"app:foo"}, nil, nil, nil)
@@ -249,7 +249,7 @@ func TestContainersRunning(t *testing.T) {
// Define tags for 3 different containers. The first 2 have the same tags.
// The third one shares the image-related tags, but has a different
// "service" tag.
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "e2d5394a5321d4a59497f53552a0131b2aafe64faba37f4738e78c531289fc45"), "foo", []string{"image_name:datadog/agent", "short:agent", "tag:latest", "service:s1"}, nil, nil, nil)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "b781900d227cf8d63a0922705018b66610f789644bf236cb72c8698b31383074"), "foo", []string{"image_name:datadog/agent", "short:agent", "tag:latest", "service:s1"}, nil, nil, nil)
fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "be2584a7d1a2a3ae9f9c688e9ce7a88991c028507fec7c70a660b705bd2a5b90"), "foo", []string{"image_name:datadog/agent", "short:agent", "tag:latest", "service:s2"}, nil, nil, nil)
@@ -303,7 +303,7 @@ func TestContainersRunning(t *testing.T) {
}
func TestProcess_CPUSharesMetric(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containersMeta := []*workloadmeta.Container{
generic.CreateContainerMeta("docker", "cID100"),
diff --git a/pkg/collector/corechecks/containers/docker/eventbundle.go b/pkg/collector/corechecks/containers/docker/eventbundle.go
index c3df8b979e70e..b981ad269831c 100644
--- a/pkg/collector/corechecks/containers/docker/eventbundle.go
+++ b/pkg/collector/corechecks/containers/docker/eventbundle.go
@@ -15,7 +15,7 @@ import (
"github.com/docker/docker/api/types/events"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/util/docker"
diff --git a/pkg/collector/corechecks/containers/docker/events_test.go b/pkg/collector/corechecks/containers/docker/events_test.go
index 32399e3a53a20..f020f7bfc96fc 100644
--- a/pkg/collector/corechecks/containers/docker/events_test.go
+++ b/pkg/collector/corechecks/containers/docker/events_test.go
@@ -13,7 +13,7 @@ import (
"github.com/docker/docker/api/types/events"
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/metrics/servicecheck"
@@ -21,7 +21,7 @@ import (
)
func TestReportExitCodes(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
dockerCheck := &DockerCheck{
instance: &DockerConfig{},
@@ -125,7 +125,7 @@ func TestReportExitCodes(t *testing.T) {
}
func TestAggregateEvents(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
testCases := []struct {
events []*docker.ContainerEvent
diff --git a/pkg/collector/corechecks/containers/docker/stub.go b/pkg/collector/corechecks/containers/docker/stub.go
index 12f3c764878fe..899aef9653812 100644
--- a/pkg/collector/corechecks/containers/docker/stub.go
+++ b/pkg/collector/corechecks/containers/docker/stub.go
@@ -9,7 +9,7 @@
package docker
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events.go b/pkg/collector/corechecks/containers/docker/unbundled_events.go
index e51cf2c17056c..0a9f9a264d63d 100644
--- a/pkg/collector/corechecks/containers/docker/unbundled_events.go
+++ b/pkg/collector/corechecks/containers/docker/unbundled_events.go
@@ -10,7 +10,7 @@ package docker
import (
"fmt"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/util/docker"
diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go
index 2599f9b8b7d62..c068f4d679821 100644
--- a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go
+++ b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go
@@ -19,7 +19,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/util/docker"
@@ -128,7 +128,7 @@ func TestUnbundledEventsTransform(t *testing.T) {
},
}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
for _, ev := range incomingEvents {
fakeTagger.SetTags(
diff --git a/pkg/collector/corechecks/containers/generic/check.go b/pkg/collector/corechecks/containers/generic/check.go
index cf3cacad20579..a0686f1052a1c 100644
--- a/pkg/collector/corechecks/containers/generic/check.go
+++ b/pkg/collector/corechecks/containers/generic/check.go
@@ -12,7 +12,7 @@ import (
yaml "gopkg.in/yaml.v2"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/containers/generic/mock.go b/pkg/collector/corechecks/containers/generic/mock.go
index e14823a8578b0..5f1fbcc8de8d8 100644
--- a/pkg/collector/corechecks/containers/generic/mock.go
+++ b/pkg/collector/corechecks/containers/generic/mock.go
@@ -10,7 +10,7 @@ package generic
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock"
diff --git a/pkg/collector/corechecks/containers/generic/processor.go b/pkg/collector/corechecks/containers/generic/processor.go
index 9f76abc078e85..34b06641e0fc8 100644
--- a/pkg/collector/corechecks/containers/generic/processor.go
+++ b/pkg/collector/corechecks/containers/generic/processor.go
@@ -8,7 +8,7 @@ package generic
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/generic/processor_extension.go b/pkg/collector/corechecks/containers/generic/processor_extension.go
index e2324a69e659d..6a967dea045d8 100644
--- a/pkg/collector/corechecks/containers/generic/processor_extension.go
+++ b/pkg/collector/corechecks/containers/generic/processor_extension.go
@@ -8,7 +8,7 @@ package generic
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/util/containers/metrics"
diff --git a/pkg/collector/corechecks/containers/generic/processor_network.go b/pkg/collector/corechecks/containers/generic/processor_network.go
index 9173b615fda3c..1260d57ac517f 100644
--- a/pkg/collector/corechecks/containers/generic/processor_network.go
+++ b/pkg/collector/corechecks/containers/generic/processor_network.go
@@ -8,7 +8,7 @@ package generic
import (
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/generic/processor_network_test.go b/pkg/collector/corechecks/containers/generic/processor_network_test.go
index 2a2ac804ed312..c05f3b1e620d9 100644
--- a/pkg/collector/corechecks/containers/generic/processor_network_test.go
+++ b/pkg/collector/corechecks/containers/generic/processor_network_test.go
@@ -8,7 +8,7 @@ package generic
import (
"testing"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
@@ -21,7 +21,7 @@ func TestNetworkProcessorExtension(t *testing.T) {
mockSender := mocksender.NewMockSender("network-extension")
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
mockCollector := mock.NewCollector("testCollector")
diff --git a/pkg/collector/corechecks/containers/generic/processor_test.go b/pkg/collector/corechecks/containers/generic/processor_test.go
index 5ac5c84f4f020..ea132c70fe367 100644
--- a/pkg/collector/corechecks/containers/generic/processor_test.go
+++ b/pkg/collector/corechecks/containers/generic/processor_test.go
@@ -11,14 +11,14 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggerUtils "github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock"
)
func TestProcessorRunFullStatsLinux(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containersMeta := []*workloadmeta.Container{
// Container with full stats
@@ -88,7 +88,7 @@ func TestProcessorRunFullStatsLinux(t *testing.T) {
}
func TestProcessorRunPartialStats(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
containersMeta := []*workloadmeta.Container{
// Container without stats
diff --git a/pkg/collector/corechecks/containers/kubelet/common/pod.go b/pkg/collector/corechecks/containers/kubelet/common/pod.go
index 6519e99ceba0b..f0689b2c852d1 100644
--- a/pkg/collector/corechecks/containers/kubelet/common/pod.go
+++ b/pkg/collector/corechecks/containers/kubelet/common/pod.go
@@ -14,7 +14,7 @@ import (
"github.com/prometheus/common/model"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/tags"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/kubelet/kubelet.go b/pkg/collector/corechecks/containers/kubelet/kubelet.go
index f3b4056766ee1..86d7386ecdba7 100644
--- a/pkg/collector/corechecks/containers/kubelet/kubelet.go
+++ b/pkg/collector/corechecks/containers/kubelet/kubelet.go
@@ -10,7 +10,7 @@ package kubelet
import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go
index 96672668f4793..9f4d42a558297 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go
@@ -17,8 +17,8 @@ import (
"github.com/prometheus/common/model"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider_test.go
index 98ba28357154f..e8c3e3e7fbb4d 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider_test.go
@@ -18,9 +18,9 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
@@ -108,7 +108,7 @@ func (suite *ProviderTestSuite) SetupTest() {
mockSender.SetupAcceptAll()
suite.mockSender = mockSender
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := mock.SetupFakeTagger(suite.T())
for entity, tags := range commontesting.CommonTags {
prefix, id, _ := taggercommon.ExtractPrefixAndID(entity)
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go
index 5f3fe21e3d85c..02fed597b8e9a 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider.go
@@ -15,7 +15,7 @@ import (
"github.com/prometheus/common/model"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go
index bbb51a4ec18f9..818a6da7830c9 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go
@@ -19,9 +19,9 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
@@ -139,7 +139,7 @@ func (suite *ProviderTestSuite) SetupTest() {
mockSender.SetupAcceptAll()
suite.mockSender = mockSender
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := mock.SetupFakeTagger(suite.T())
for entity, tags := range commontesting.CommonTags {
prefix, id, _ := taggercommon.ExtractPrefixAndID(entity)
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go
index 2d51552af013b..738f4bd53e6cc 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider.go
@@ -16,7 +16,7 @@ import (
"strings"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/tags"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider_test.go
index 0f593fed47711..152738df97ac7 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/pod/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/pod/provider_test.go
@@ -24,9 +24,9 @@ import (
"github.com/stretchr/testify/suite"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
@@ -116,7 +116,7 @@ func (suite *ProviderTestSuite) SetupTest() {
mockSender.SetupAcceptAll()
suite.mockSender = mockSender
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := mock.SetupFakeTagger(suite.T())
for entity, tags := range commontesting.CommonTags {
prefix, id, _ := taggercommon.ExtractPrefixAndID(entity)
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go
index 13baf09751679..7c7fcdc13b9cd 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go
@@ -10,7 +10,7 @@
package probe
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go
index 0a7fde8493d1f..f0c7b89812eb9 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go
@@ -21,7 +21,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
@@ -273,7 +273,7 @@ func TestProvider_Provide(t *testing.T) {
mockSender := mocksender.NewMockSender(checkid.ID(t.Name()))
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
for entity, tags := range probeTags {
prefix, id, _ := taggercommon.ExtractPrefixAndID(entity)
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/slis/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/slis/provider_test.go
index 75ce2a77a09f2..0dfae0857a9ca 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/slis/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/slis/provider_test.go
@@ -15,7 +15,7 @@ import (
"testing"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -171,7 +171,7 @@ func TestProvider_Provide(t *testing.T) {
mockSender := mocksender.NewMockSender(checkid.ID(t.Name()))
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
err = commontesting.StorePopulatedFromFile(store, tt.podsFile, common.NewPodUtils(fakeTagger))
if err != nil {
@@ -239,7 +239,7 @@ func TestProvider_DisableProvider(t *testing.T) {
mockSender := mocksender.NewMockSender(checkid.ID(t.Name()))
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
err = commontesting.StorePopulatedFromFile(store, "../../testdata/pods.json", common.NewPodUtils(fakeTagger))
if err != nil {
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go
index c3662f9cd90a4..84d06415b6bfe 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider.go
@@ -15,7 +15,7 @@ import (
kubeletv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/tagger/utils"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
diff --git a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider_test.go
index 66e909cb9d13d..43263606c59c2 100644
--- a/pkg/collector/corechecks/containers/kubelet/provider/summary/provider_test.go
+++ b/pkg/collector/corechecks/containers/kubelet/provider/summary/provider_test.go
@@ -22,7 +22,7 @@ import (
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
@@ -335,7 +335,7 @@ func TestProvider_Provide(t *testing.T) {
mockSender := mocksender.NewMockSender(checkid.ID(t.Name()))
mockSender.SetupAcceptAll()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
for entity, tags := range entityTags {
prefix, id, _ := taggercommon.ExtractPrefixAndID(entity)
diff --git a/pkg/collector/corechecks/containers/kubelet/stub.go b/pkg/collector/corechecks/containers/kubelet/stub.go
index 1218461201f9d..e07885b903134 100644
--- a/pkg/collector/corechecks/containers/kubelet/stub.go
+++ b/pkg/collector/corechecks/containers/kubelet/stub.go
@@ -9,7 +9,7 @@
package kubelet
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
index 5ad080692ca42..5120cd59d6968 100644
--- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
+++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go
@@ -20,7 +20,7 @@ import (
sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/ebpf/oomkill/stub.go b/pkg/collector/corechecks/ebpf/oomkill/stub.go
index ba05e04befa9b..279c7d9fd304e 100644
--- a/pkg/collector/corechecks/ebpf/oomkill/stub.go
+++ b/pkg/collector/corechecks/ebpf/oomkill/stub.go
@@ -9,7 +9,7 @@
package oomkill
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
)
diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go
index 56da220ab3db2..03dc73e4bc15a 100644
--- a/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go
+++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go
@@ -9,7 +9,7 @@
package tcpqueuelength
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
)
diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go
index 4458c8d21609d..18336e67a306b 100644
--- a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go
+++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go
@@ -17,7 +17,7 @@ import (
sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/gpu/gpu.go b/pkg/collector/corechecks/gpu/gpu.go
index f0b0cb80c1ec5..e52ba27768d3a 100644
--- a/pkg/collector/corechecks/gpu/gpu.go
+++ b/pkg/collector/corechecks/gpu/gpu.go
@@ -38,11 +38,11 @@ const (
// Check represents the GPU check that will be periodically executed via the Run() function
type Check struct {
core.CheckBase
- config *CheckConfig // config for the check
- sysProbeUtil processnet.SysProbeUtil // sysProbeUtil is used to communicate with system probe
- activePIDs map[uint32]bool // activePIDs is a set of PIDs that have been seen in the current check run
- collectors []nvidia.Collector // collectors for NVML metrics
- nvmlLib nvml.Interface // NVML library interface
+ config *CheckConfig // config for the check
+ sysProbeUtil processnet.SysProbeUtil // sysProbeUtil is used to communicate with system probe
+ activeMetrics map[model.StatsKey]bool // activeMetrics is a set of metrics that have been seen in the current check run
+ collectors []nvidia.Collector // collectors for NVML metrics
+ nvmlLib nvml.Interface // NVML library interface
}
// Factory creates a new check factory
@@ -52,9 +52,9 @@ func Factory() optional.Option[func() check.Check] {
func newCheck() check.Check {
return &Check{
- CheckBase: core.NewCheckBase(CheckName),
- config: &CheckConfig{},
- activePIDs: make(map[uint32]bool),
+ CheckBase: core.NewCheckBase(CheckName),
+ config: &CheckConfig{},
+ activeMetrics: make(map[model.StatsKey]bool),
}
}
@@ -145,38 +145,46 @@ func (m *Check) emitSysprobeMetrics(snd sender.Sender) error {
return fmt.Errorf("gpu check raw data has incorrect type: %T", stats)
}
- // Set all PIDs to inactive, so we can remove the ones that we don't see
+ // Set all metrics to inactive, so we can remove the ones that we don't see
// and send the final metrics
- for pid := range m.activePIDs {
- m.activePIDs[pid] = false
+ for key := range m.activeMetrics {
+ m.activeMetrics[key] = false
}
- for pid, pidStats := range stats.ProcessStats {
- // Per-PID metrics are subject to change due to high cardinality
- tags := []string{fmt.Sprintf("pid:%d", pid)}
- snd.Gauge(metricNameUtil, pidStats.UtilizationPercentage, "", tags)
+ for _, entry := range stats.Metrics {
+ key := entry.Key
+ metrics := entry.UtilizationMetrics
+ tags := getTagsForKey(key)
+ snd.Gauge(metricNameUtil, metrics.UtilizationPercentage, "", tags)
+ snd.Gauge(metricNameMemory, float64(metrics.Memory.CurrentBytes), "", tags)
+ snd.Gauge(metricNameMaxMem, float64(metrics.Memory.MaxBytes), "", tags)
- snd.Gauge(metricNameMemory, float64(pidStats.Memory.CurrentBytes), "", tags)
- snd.Gauge(metricNameMaxMem, float64(pidStats.Memory.MaxBytes), "", tags)
-
- m.activePIDs[pid] = true
+ m.activeMetrics[key] = true
}
// Remove the PIDs that we didn't see in this check
- for pid, active := range m.activePIDs {
+ for key, active := range m.activeMetrics {
if !active {
- tags := []string{fmt.Sprintf("pid:%d", pid)}
+ tags := getTagsForKey(key)
snd.Gauge(metricNameMemory, 0, "", tags)
snd.Gauge(metricNameMaxMem, 0, "", tags)
snd.Gauge(metricNameUtil, 0, "", tags)
- delete(m.activePIDs, pid)
+ delete(m.activeMetrics, key)
}
}
return nil
}
+func getTagsForKey(key model.StatsKey) []string {
+ // Per-PID metrics are subject to change due to high cardinality
+ return []string{
+ fmt.Sprintf("pid:%d", key.PID),
+ fmt.Sprintf("gpu_uuid:%s", key.DeviceUUID),
+ }
+}
+
func (m *Check) emitNvmlMetrics(snd sender.Sender) error {
var err error
diff --git a/pkg/collector/corechecks/gpu/model/model.go b/pkg/collector/corechecks/gpu/model/model.go
index b963a5d763c0e..c936639ae2f79 100644
--- a/pkg/collector/corechecks/gpu/model/model.go
+++ b/pkg/collector/corechecks/gpu/model/model.go
@@ -7,20 +7,35 @@
// the gpu core agent check
package model
-// MemoryStats contains the memory stats for a given memory type
-type MemoryStats struct {
+// MemoryMetrics contains the memory stats for a given memory type
+type MemoryMetrics struct {
CurrentBytes uint64 `json:"current_bytes"`
MaxBytes uint64 `json:"max_bytes"`
}
-// ProcessStats contains the GPU stats for a given PID
-type ProcessStats struct {
- UtilizationPercentage float64 `json:"utilization_percentage"`
- Memory MemoryStats `json:"memory"`
+// UtilizationMetrics contains the GPU stats for a given device and process
+type UtilizationMetrics struct {
+ UtilizationPercentage float64 `json:"utilization_percentage"`
+ Memory MemoryMetrics `json:"memory"`
}
-// GPUStats contains the past and current data for all streams, including kernel spans and allocations.
-// This is the data structure that is sent to the agent
+// StatsKey is the key used to identify a GPUStats object
+type StatsKey struct {
+ // PID is the process ID
+ PID uint32 `json:"pid"`
+
+ // DeviceUUID is the UUID of the device
+ DeviceUUID string `json:"device_uuid"`
+}
+
+// StatsTuple is a single entry in the GPUStats array, as we cannot use a complex key in the map
+type StatsTuple struct {
+ Key StatsKey
+ UtilizationMetrics UtilizationMetrics
+}
+
+// GPUStats contains the statistics generated by the system-probe GPU module, which are sent to the core agent.
+// Contains an array of StatsTuple, where each entry is a tuple of a StatsKey and the corresponding UtilizationMetrics
type GPUStats struct {
- ProcessStats map[uint32]ProcessStats `json:"process_stats"`
+ Metrics []StatsTuple `json:"metrics"`
}
diff --git a/pkg/collector/corechecks/loader.go b/pkg/collector/corechecks/loader.go
index e25026364abb3..b5aa6de4e8cc9 100644
--- a/pkg/collector/corechecks/loader.go
+++ b/pkg/collector/corechecks/loader.go
@@ -10,7 +10,7 @@ import (
"fmt"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go
index 31040c24b68b2..28bef69f2bbca 100644
--- a/pkg/collector/corechecks/networkpath/networkpath.go
+++ b/pkg/collector/corechecks/networkpath/networkpath.go
@@ -23,6 +23,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/networkpath/metricsender"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
"github.com/DataDog/datadog-agent/pkg/networkpath/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/util/optional"
@@ -50,7 +51,7 @@ func (c *Check) Run() error {
}
metricSender := metricsender.NewMetricSenderAgent(senderInstance)
- cfg := traceroute.Config{
+ cfg := config.Config{
DestHostname: c.config.DestHostname,
DestPort: c.config.DestPort,
MaxTTL: c.config.MaxTTL,
diff --git a/pkg/collector/corechecks/orchestrator/ecs/ecs.go b/pkg/collector/corechecks/orchestrator/ecs/ecs.go
index 96213cfabe7c8..2db68516a85e1 100644
--- a/pkg/collector/corechecks/orchestrator/ecs/ecs.go
+++ b/pkg/collector/corechecks/orchestrator/ecs/ecs.go
@@ -24,7 +24,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go b/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go
index b8c72fc47a3f1..d040257168e0b 100644
--- a/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go
+++ b/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go
@@ -16,7 +16,7 @@ import (
"github.com/DataDog/agent-payload/v5/process"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs"
@@ -166,7 +166,7 @@ func prepareTest(t *testing.T, v4 bool, env string) (*Check, *fakeWorkloadmetaSt
systemInfo, _ := checks.CollectSystemInfo()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
c := &Check{
sender: sender,
diff --git a/pkg/collector/corechecks/orchestrator/ecs/stub.go b/pkg/collector/corechecks/orchestrator/ecs/stub.go
index 20b811e2f6288..153401d5fbc49 100644
--- a/pkg/collector/corechecks/orchestrator/ecs/stub.go
+++ b/pkg/collector/corechecks/orchestrator/ecs/stub.go
@@ -9,7 +9,7 @@
package ecs
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/orchestrator/pod/pod.go b/pkg/collector/corechecks/orchestrator/pod/pod.go
index 181383cbf8f2d..97e92b617d647 100644
--- a/pkg/collector/corechecks/orchestrator/pod/pod.go
+++ b/pkg/collector/corechecks/orchestrator/pod/pod.go
@@ -18,7 +18,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/orchestrator/pod/pod_test.go b/pkg/collector/corechecks/orchestrator/pod/pod_test.go
index 35a928e017c6f..62ca9c12df7e9 100644
--- a/pkg/collector/corechecks/orchestrator/pod/pod_test.go
+++ b/pkg/collector/corechecks/orchestrator/pod/pod_test.go
@@ -25,8 +25,7 @@ import (
"github.com/DataDog/agent-payload/v5/process"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -114,7 +113,7 @@ type PodTestSuite struct {
testServer *httptest.Server
sender *fakeSender
kubeUtil kubelet.KubeUtilInterface
- tagger tagger.Mock
+ tagger mock.Mock
}
func (suite *PodTestSuite) SetupSuite() {
@@ -147,7 +146,7 @@ func (suite *PodTestSuite) SetupSuite() {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(suite.T())
+ fakeTagger := mock.SetupFakeTagger(suite.T())
suite.tagger = fakeTagger
suite.check = &Check{
diff --git a/pkg/collector/corechecks/orchestrator/pod/stub.go b/pkg/collector/corechecks/orchestrator/pod/stub.go
index 74cb3cfb7ca96..556d236381204 100644
--- a/pkg/collector/corechecks/orchestrator/pod/stub.go
+++ b/pkg/collector/corechecks/orchestrator/pod/stub.go
@@ -10,7 +10,7 @@ package pod
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/sbom/check.go b/pkg/collector/corechecks/sbom/check.go
index 25367efdd1614..b646b5a51e55e 100644
--- a/pkg/collector/corechecks/sbom/check.go
+++ b/pkg/collector/corechecks/sbom/check.go
@@ -16,7 +16,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/corechecks/sbom/check_no_trivy.go b/pkg/collector/corechecks/sbom/check_no_trivy.go
index 50bfcfa91061b..752d65e0217e7 100644
--- a/pkg/collector/corechecks/sbom/check_no_trivy.go
+++ b/pkg/collector/corechecks/sbom/check_no_trivy.go
@@ -9,7 +9,7 @@ package sbom
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/collector/check"
"github.com/DataDog/datadog-agent/pkg/util/optional"
diff --git a/pkg/collector/corechecks/sbom/check_test.go b/pkg/collector/corechecks/sbom/check_test.go
index 9fe71d0f23099..0a4a075cff975 100644
--- a/pkg/collector/corechecks/sbom/check_test.go
+++ b/pkg/collector/corechecks/sbom/check_test.go
@@ -14,7 +14,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -110,7 +110,7 @@ host_heartbeat_validity_seconds: 1000000
func TestFactory(t *testing.T) {
cfg := config.NewMock(t)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
mockStore := fxutil.Test[workloadmetamock.Mock](t, fx.Options(
core.MockBundle(),
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
@@ -170,7 +170,7 @@ func TestConfigure(t *testing.T) {
InitHelper: common.GetWorkloadmetaInit(),
}),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
cfg := app.Cfg
mockStore := app.Store
diff --git a/pkg/collector/corechecks/sbom/processor.go b/pkg/collector/corechecks/sbom/processor.go
index 2040b86a7fd96..0be97cf271cb3 100644
--- a/pkg/collector/corechecks/sbom/processor.go
+++ b/pkg/collector/corechecks/sbom/processor.go
@@ -13,7 +13,7 @@ import (
"strings"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform"
diff --git a/pkg/collector/corechecks/sbom/processor_test.go b/pkg/collector/corechecks/sbom/processor_test.go
index 03c721b136194..4fcc1c3ba770e 100644
--- a/pkg/collector/corechecks/sbom/processor_test.go
+++ b/pkg/collector/corechecks/sbom/processor_test.go
@@ -27,7 +27,7 @@ import (
configcomp "github.com/DataDog/datadog-agent/comp/core/config"
log "github.com/DataDog/datadog-agent/comp/core/log/def"
logmock "github.com/DataDog/datadog-agent/comp/core/log/mock"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -626,7 +626,7 @@ func TestProcessEvents(t *testing.T) {
SBOMsSent.Inc()
})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// Define a max size of 1 for the queue. With a size > 1, it's difficult to
// control the number of events sent on each call.
diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go
index f90a16448e48c..f342f955e2274 100644
--- a/pkg/collector/corechecks/servicediscovery/apm/detect.go
+++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go
@@ -10,7 +10,6 @@ package apm
import (
"bufio"
- "debug/elf"
"io"
"io/fs"
"os"
@@ -24,6 +23,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// Instrumentation represents the state of APM instrumentation for a service.
@@ -100,7 +100,7 @@ const (
func goDetector(ctx usm.DetectionContext) Instrumentation {
exePath := kernel.HostProc(strconv.Itoa(ctx.Pid), "exe")
- elfFile, err := elf.Open(exePath)
+ elfFile, err := safeelf.Open(exePath)
if err != nil {
log.Debugf("Unable to open exe %s: %v", exePath, err)
return None
diff --git a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go
index 519e8a5ee8e7f..720f4f713a35b 100644
--- a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go
+++ b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go
@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -25,7 +25,7 @@ import (
func TestProfileBundleJsonZip(t *testing.T) {
timeNow = common.MockTimeNow
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "zipprofiles.d"))
pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath)
diff --git a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go
index 22cb6e3f26c88..06b55c7df2180 100644
--- a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go
+++ b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go
@@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -32,7 +32,7 @@ import (
func TestProfileMetadata_f5(t *testing.T) {
timeNow = common.MockTimeNow
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d"))
pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath)
diff --git a/pkg/collector/corechecks/snmp/integration_topology_test.go b/pkg/collector/corechecks/snmp/integration_topology_test.go
index 65915af08a005..0f5ccfbc2e5f4 100644
--- a/pkg/collector/corechecks/snmp/integration_topology_test.go
+++ b/pkg/collector/corechecks/snmp/integration_topology_test.go
@@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -32,7 +32,7 @@ import (
func TestTopologyPayload_LLDP(t *testing.T) {
timeNow = common.MockTimeNow
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d"))
pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath)
@@ -734,7 +734,7 @@ profiles:
func TestTopologyPayload_CDP(t *testing.T) {
timeNow = common.MockTimeNow
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d"))
pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath)
@@ -1427,7 +1427,7 @@ profiles:
// we have different data for LLDP and CDP to test that we're only using LLDP to build the links
func TestTopologyPayload_LLDP_CDP(t *testing.T) {
timeNow = common.MockTimeNow
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d"))
pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath)
diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
index cffed1f43f099..997aadd210390 100644
--- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
+++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
@@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/assert"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/pkg/aggregator"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -25,7 +25,7 @@ import (
func TestConfigurations(t *testing.T) {
profile.SetConfdPathAndCleanProfiles()
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
// language=yaml
rawInstanceConfig := []byte(`
@@ -326,7 +326,7 @@ profiles:
func TestInlineProfileConfiguration(t *testing.T) {
profile.SetConfdPathAndCleanProfiles()
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
// language=yaml
rawInstanceConfig := []byte(`
diff --git a/pkg/collector/corechecks/systemd/systemd_test.go b/pkg/collector/corechecks/systemd/systemd_test.go
index e473b47a8875a..6f91dc4edd2fe 100644
--- a/pkg/collector/corechecks/systemd/systemd_test.go
+++ b/pkg/collector/corechecks/systemd/systemd_test.go
@@ -20,7 +20,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
"github.com/DataDog/datadog-agent/comp/metadata/inventorychecks/inventorychecksimpl"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
@@ -1087,7 +1087,7 @@ unit_names:
func TestCheckID(t *testing.T) {
check1 := newCheck()
check2 := newCheck()
- aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewTaggerClient(), "", 1*time.Hour)
+ aggregator.NewBufferedAggregator(nil, nil, nooptagger.NewComponent(), "", 1*time.Hour)
// language=yaml
rawInstanceConfig1 := []byte(`
diff --git a/pkg/collector/loaders/loaders.go b/pkg/collector/loaders/loaders.go
index 668a7635d15c6..93a2fd8c0b126 100644
--- a/pkg/collector/loaders/loaders.go
+++ b/pkg/collector/loaders/loaders.go
@@ -10,7 +10,7 @@ import (
"sort"
"sync"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/loaders/loaders_test.go b/pkg/collector/loaders/loaders_test.go
index 3f98d7e6b0f85..026be454683d6 100644
--- a/pkg/collector/loaders/loaders_test.go
+++ b/pkg/collector/loaders/loaders_test.go
@@ -13,8 +13,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
@@ -77,7 +77,7 @@ func TestLoaderCatalog(t *testing.T) {
RegisterLoader(30, factory3)
senderManager := mocksender.CreateDefaultDemultiplexer()
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
require.Len(t, LoaderCatalog(senderManager, logReceiver, tagger), 2)
assert.Equal(t, l1, LoaderCatalog(senderManager, logReceiver, tagger)[1])
assert.Equal(t, l2, LoaderCatalog(senderManager, logReceiver, tagger)[0])
diff --git a/pkg/collector/python/check_context.go b/pkg/collector/python/check_context.go
index f2353de933eb1..78344ee06eb65 100644
--- a/pkg/collector/python/check_context.go
+++ b/pkg/collector/python/check_context.go
@@ -11,7 +11,7 @@ import (
"errors"
"sync"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/util/log"
diff --git a/pkg/collector/python/loader.go b/pkg/collector/python/loader.go
index 4d72ccb7d3b54..2135295ec22a6 100644
--- a/pkg/collector/python/loader.go
+++ b/pkg/collector/python/loader.go
@@ -18,7 +18,7 @@ import (
"github.com/mohae/deepcopy"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
diff --git a/pkg/collector/python/test_aggregator.go b/pkg/collector/python/test_aggregator.go
index 3864b5e1ee579..66d39d003d8c6 100644
--- a/pkg/collector/python/test_aggregator.go
+++ b/pkg/collector/python/test_aggregator.go
@@ -10,8 +10,8 @@ package python
import (
"testing"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
@@ -27,7 +27,7 @@ import "C"
func testSubmitMetric(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -104,7 +104,7 @@ func testSubmitMetric(t *testing.T) {
func testSubmitMetricEmptyTags(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -125,7 +125,7 @@ func testSubmitMetricEmptyTags(t *testing.T) {
func testSubmitMetricEmptyHostname(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -146,7 +146,7 @@ func testSubmitMetricEmptyHostname(t *testing.T) {
func testSubmitServiceCheck(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -166,7 +166,7 @@ func testSubmitServiceCheck(t *testing.T) {
func testSubmitServiceCheckEmptyTag(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -186,7 +186,7 @@ func testSubmitServiceCheckEmptyTag(t *testing.T) {
func testSubmitServiceCheckEmptyHostame(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -206,7 +206,7 @@ func testSubmitServiceCheckEmptyHostame(t *testing.T) {
func testSubmitEvent(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -244,7 +244,7 @@ func testSubmitEvent(t *testing.T) {
func testSubmitHistogramBucket(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -269,7 +269,7 @@ func testSubmitHistogramBucket(t *testing.T) {
func testSubmitEventPlatformEvent(t *testing.T) {
sender := mocksender.NewMockSender("testID")
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
diff --git a/pkg/collector/python/test_loader.go b/pkg/collector/python/test_loader.go
index 2c349f91dc26b..77233e4e2cb36 100644
--- a/pkg/collector/python/test_loader.go
+++ b/pkg/collector/python/test_loader.go
@@ -12,7 +12,7 @@ import (
"testing"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/util/optional"
@@ -143,7 +143,7 @@ func testLoadCustomCheck(t *testing.T) {
defer func() { rtloader = nil }()
senderManager := mocksender.CreateDefaultDemultiplexer()
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
loader, err := NewPythonCheckLoader(senderManager, logReceiver, tagger)
assert.Nil(t, err)
@@ -182,7 +182,7 @@ func testLoadWheelCheck(t *testing.T) {
senderManager := mocksender.CreateDefaultDemultiplexer()
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := nooptagger.NewTaggerClient()
+ tagger := nooptagger.NewComponent()
loader, err := NewPythonCheckLoader(senderManager, logReceiver, tagger)
assert.Nil(t, err)
diff --git a/pkg/collector/python/test_tagger.go b/pkg/collector/python/test_tagger.go
index 4c320e63201c3..a050437874d51 100644
--- a/pkg/collector/python/test_tagger.go
+++ b/pkg/collector/python/test_tagger.go
@@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
@@ -42,7 +42,7 @@ import "C"
func testTags(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := taggerimpl.SetupFakeTagger(t)
+ tagger := mock.SetupFakeTagger(t)
tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", []string{"tag1", "tag2", "tag3"}, nil, nil, nil)
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -65,7 +65,7 @@ func testTags(t *testing.T) {
func testTagsNull(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := taggerimpl.SetupFakeTagger(t)
+ tagger := mock.SetupFakeTagger(t)
tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", nil, nil, nil, nil)
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
@@ -80,7 +80,7 @@ func testTagsNull(t *testing.T) {
func testTagsEmpty(t *testing.T) {
sender := mocksender.NewMockSender(checkid.ID("testID"))
logReceiver := optional.NewNoneOption[integrations.Component]()
- tagger := taggerimpl.SetupFakeTagger(t)
+ tagger := mock.SetupFakeTagger(t)
tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", []string{}, nil, nil, nil)
release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger)
defer release()
diff --git a/pkg/collector/scheduler.go b/pkg/collector/scheduler.go
index 724bffe0e722c..1e2d76d6fd767 100644
--- a/pkg/collector/scheduler.go
+++ b/pkg/collector/scheduler.go
@@ -17,7 +17,7 @@ import (
"github.com/DataDog/datadog-agent/comp/collector/collector"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/collector/check"
diff --git a/pkg/collector/worker/worker.go b/pkg/collector/worker/worker.go
index 7a366434e317a..00a0b40668135 100644
--- a/pkg/collector/worker/worker.go
+++ b/pkg/collector/worker/worker.go
@@ -20,6 +20,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/telemetry"
"github.com/DataDog/datadog-agent/pkg/util/hostname"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/utilizationtracker"
)
const (
@@ -122,7 +123,8 @@ func newWorkerWithOptions(
func (w *Worker) Run() {
log.Debugf("Runner %d, worker %d: Ready to process checks...", w.runnerID, w.ID)
- utilizationTracker := NewUtilizationTracker(w.Name, w.utilizationTickInterval)
+ alpha := 0.25 // converges to 99.98% of constant input in 30 iterations.
+ utilizationTracker := utilizationtracker.NewUtilizationTracker(w.utilizationTickInterval, alpha)
defer utilizationTracker.Stop()
startUtilizationUpdater(w.Name, utilizationTracker)
@@ -146,12 +148,12 @@ func (w *Worker) Run() {
expvars.AddRunningCheckCount(1)
expvars.SetRunningStats(check.ID(), checkStartTime)
- utilizationTracker.CheckStarted()
+ utilizationTracker.Started()
// Run the check
checkErr := check.Run()
- utilizationTracker.CheckFinished()
+ utilizationTracker.Finished()
expvars.DeleteRunningStats(check.ID())
@@ -210,7 +212,7 @@ func (w *Worker) Run() {
log.Debugf("Runner %d, worker %d: Finished processing checks.", w.runnerID, w.ID)
}
-func startUtilizationUpdater(name string, ut *UtilizationTracker) {
+func startUtilizationUpdater(name string, ut *utilizationtracker.UtilizationTracker) {
expvars.SetWorkerStats(name, &expvars.WorkerStats{
Utilization: 0.0,
})
@@ -229,7 +231,7 @@ func startUtilizationUpdater(name string, ut *UtilizationTracker) {
}()
}
-func startTrackerTicker(ut *UtilizationTracker, interval time.Duration) func() {
+func startTrackerTicker(ut *utilizationtracker.UtilizationTracker, interval time.Duration) func() {
ticker := time.NewTicker(interval)
cancel := make(chan struct{}, 1)
done := make(chan struct{})
diff --git a/pkg/commonchecks/corechecks.go b/pkg/commonchecks/corechecks.go
index dbd67603cbb5b..a30c33fbfbc41 100644
--- a/pkg/commonchecks/corechecks.go
+++ b/pkg/commonchecks/corechecks.go
@@ -8,7 +8,7 @@ package commonchecks
import (
"github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
corecheckLoader "github.com/DataDog/datadog-agent/pkg/collector/corechecks"
diff --git a/pkg/config/nodetreemodel/config.go b/pkg/config/nodetreemodel/config.go
index edb7cc5082199..1c794c70829db 100644
--- a/pkg/config/nodetreemodel/config.go
+++ b/pkg/config/nodetreemodel/config.go
@@ -21,8 +21,8 @@ import (
"github.com/DataDog/viper"
"github.com/mohae/deepcopy"
- "github.com/spf13/afero"
"go.uber.org/atomic"
+ "golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"github.com/DataDog/datadog-agent/pkg/config/model"
@@ -390,25 +390,20 @@ func (c *ntmConfig) ParseEnvAsSlice(key string, fn func(string) []interface{}) {
c.envTransform[strings.ToLower(key)] = func(k string) interface{} { return fn(k) }
}
-// SetFs assigns a filesystem to the config
-func (c *ntmConfig) SetFs(fs afero.Fs) {
- c.Lock()
- defer c.Unlock()
- c.noimpl.SetFs(fs)
-}
-
// IsSet checks if a key is set in the config
func (c *ntmConfig) IsSet(key string) bool {
c.RLock()
defer c.RUnlock()
- return c.noimpl.IsSet(key)
+
+ return c.IsKnown(key)
}
// AllKeysLowercased returns all keys lower-cased
func (c *ntmConfig) AllKeysLowercased() []string {
c.RLock()
defer c.RUnlock()
- return c.noimpl.AllKeys()
+
+ return maps.Keys(c.knownKeys)
}
func (c *ntmConfig) leafAtPath(key string) LeafNode {
diff --git a/pkg/config/nodetreemodel/config_test.go b/pkg/config/nodetreemodel/config_test.go
index a3d00a412af48..86b969b807c73 100644
--- a/pkg/config/nodetreemodel/config_test.go
+++ b/pkg/config/nodetreemodel/config_test.go
@@ -8,6 +8,7 @@ package nodetreemodel
import (
"fmt"
"os"
+ "sort"
"strings"
"testing"
@@ -310,3 +311,29 @@ func TestAllSettingsBySource(t *testing.T) {
}
assert.Equal(t, expected, cfg.AllSettingsBySource())
}
+
+func TestIsSet(t *testing.T) {
+ cfg := NewConfig("test", "TEST", nil)
+ cfg.SetDefault("a", 0)
+ cfg.SetDefault("b", 0)
+ cfg.BuildSchema()
+
+ cfg.Set("b", 123, model.SourceAgentRuntime)
+
+ assert.True(t, cfg.IsSet("b"))
+ assert.True(t, cfg.IsSet("a"))
+ assert.False(t, cfg.IsSet("unknown"))
+}
+
+func TestAllKeysLowercased(t *testing.T) {
+ cfg := NewConfig("test", "TEST", nil)
+ cfg.SetDefault("a", 0)
+ cfg.SetDefault("b", 0)
+ cfg.BuildSchema()
+
+ cfg.Set("b", 123, model.SourceAgentRuntime)
+
+ keys := cfg.AllKeysLowercased()
+ sort.Strings(keys)
+ assert.Equal(t, []string{"a", "b"}, keys)
+}
diff --git a/pkg/config/nodetreemodel/go.mod b/pkg/config/nodetreemodel/go.mod
index 0d13296f2f8c5..db2d57f651748 100644
--- a/pkg/config/nodetreemodel/go.mod
+++ b/pkg/config/nodetreemodel/go.mod
@@ -13,7 +13,6 @@ require (
github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3
github.com/DataDog/viper v1.13.5
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
- github.com/spf13/afero v1.11.0
github.com/stretchr/testify v1.9.0
go.uber.org/atomic v1.11.0
golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6
@@ -30,6 +29,7 @@ require (
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/pelletier/go-toml v1.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/jwalterweatherman v1.0.0 // indirect
github.com/spf13/pflag v1.0.3 // indirect
diff --git a/pkg/config/nodetreemodel/noimpl_methods.go b/pkg/config/nodetreemodel/noimpl_methods.go
index b55956a561ac9..1f3154acda082 100644
--- a/pkg/config/nodetreemodel/noimpl_methods.go
+++ b/pkg/config/nodetreemodel/noimpl_methods.go
@@ -9,13 +9,9 @@ import (
"fmt"
"github.com/DataDog/datadog-agent/pkg/util/log"
- "github.com/spf13/afero"
)
type notImplementedMethods interface {
- SetFs(afero.Fs)
- IsSet(string) bool
- AllKeys() []string
GetStringSliceE(string) ([]string, error)
GetStringMapE(string) (map[string]interface{}, error)
GetStringMapStringE(string) (map[string]string, error)
@@ -25,20 +21,6 @@ type notImplementedMethods interface {
type notImplMethodsImpl struct{}
-func (n *notImplMethodsImpl) SetFs(afero.Fs) {
- n.logErrorNotImplemented("SetFs")
-}
-
-func (n *notImplMethodsImpl) IsSet(string) bool {
- n.logErrorNotImplemented("IsSet")
- return false
-}
-
-func (n *notImplMethodsImpl) AllKeys() []string {
- n.logErrorNotImplemented("AllKeys")
- return nil
-}
-
func (n *notImplMethodsImpl) GetStringSliceE(string) ([]string, error) {
return nil, n.logErrorNotImplemented("GetStringSliceE")
}
diff --git a/pkg/config/setup/apm.go b/pkg/config/setup/apm.go
index 225aa0607b7a7..9bc6800ca14a2 100644
--- a/pkg/config/setup/apm.go
+++ b/pkg/config/setup/apm.go
@@ -76,7 +76,6 @@ func setupAPM(config pkgconfigmodel.Setup) {
config.BindEnvAndSetDefault("apm_config.receiver_port", 8126, "DD_APM_RECEIVER_PORT", "DD_RECEIVER_PORT")
config.BindEnvAndSetDefault("apm_config.windows_pipe_buffer_size", 1_000_000, "DD_APM_WINDOWS_PIPE_BUFFER_SIZE") //nolint:errcheck
config.BindEnvAndSetDefault("apm_config.windows_pipe_security_descriptor", "D:AI(A;;GA;;;WD)", "DD_APM_WINDOWS_PIPE_SECURITY_DESCRIPTOR") //nolint:errcheck
- config.BindEnvAndSetDefault("apm_config.remote_tagger", true, "DD_APM_REMOTE_TAGGER") //nolint:errcheck
config.BindEnvAndSetDefault("apm_config.peer_service_aggregation", true, "DD_APM_PEER_SERVICE_AGGREGATION") //nolint:errcheck
config.BindEnvAndSetDefault("apm_config.peer_tags_aggregation", true, "DD_APM_PEER_TAGS_AGGREGATION") //nolint:errcheck
config.BindEnvAndSetDefault("apm_config.compute_stats_by_span_kind", true, "DD_APM_COMPUTE_STATS_BY_SPAN_KIND") //nolint:errcheck
diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go
index af4d9a95ef993..27215e0975243 100644
--- a/pkg/config/setup/config.go
+++ b/pkg/config/setup/config.go
@@ -238,6 +238,8 @@ var serverlessConfigComponents = []func(pkgconfigmodel.Setup){
debugging,
vector,
podman,
+ fleet,
+ autoscaling,
}
func init() {
@@ -352,10 +354,6 @@ func InitConfig(config pkgconfigmodel.Setup) {
// Defaults to safe YAML methods in base and custom checks.
config.BindEnvAndSetDefault("disable_unsafe_yaml", true)
- // Yaml keys which values are stripped from flare
- config.BindEnvAndSetDefault("flare_stripped_keys", []string{})
- config.BindEnvAndSetDefault("scrubber.additional_keys", []string{})
-
// flare configs
config.BindEnvAndSetDefault("flare_provider_timeout", 10*time.Second)
@@ -552,9 +550,6 @@ func InitConfig(config pkgconfigmodel.Setup) {
// Used internally to protect against configurations where metadata endpoints return incorrect values with 200 status codes.
config.BindEnvAndSetDefault("metadata_endpoints_max_hostname_size", 255)
- // Duration during which the host tags will be submitted with metrics.
- config.BindEnvAndSetDefault("expected_tags_duration", time.Duration(0))
-
// EC2
config.BindEnvAndSetDefault("ec2_use_windows_prefix_detection", false)
config.BindEnvAndSetDefault("ec2_metadata_timeout", 300) // value in milliseconds
@@ -676,11 +671,6 @@ func InitConfig(config pkgconfigmodel.Setup) {
config.BindEnvAndSetDefault("checks_tag_cardinality", "low")
config.BindEnvAndSetDefault("dogstatsd_tag_cardinality", "low")
- // Autoscaling product
- config.BindEnvAndSetDefault("autoscaling.workload.enabled", false)
- config.BindEnvAndSetDefault("autoscaling.failover.enabled", false)
- config.BindEnv("autoscaling.failover.metrics")
-
config.BindEnvAndSetDefault("hpa_watcher_polling_freq", 10)
config.BindEnvAndSetDefault("hpa_watcher_gc_period", 60*5) // 5 minutes
config.BindEnvAndSetDefault("hpa_configmap_name", "datadog-custom-metrics")
@@ -897,7 +887,6 @@ func InitConfig(config pkgconfigmodel.Setup) {
config.BindEnvAndSetDefault("security_agent.cmd_port", DefaultSecurityAgentCmdPort)
config.BindEnvAndSetDefault("security_agent.expvar_port", 5011)
config.BindEnvAndSetDefault("security_agent.log_file", DefaultSecurityAgentLogFile)
- config.BindEnvAndSetDefault("security_agent.remote_tagger", true)
config.BindEnvAndSetDefault("security_agent.remote_workloadmeta", true)
// debug config to enable a remote client to receive data from the workloadmeta agent without a timeout
@@ -1001,8 +990,6 @@ func InitConfig(config pkgconfigmodel.Setup) {
config.BindEnvAndSetDefault("installer.registry.auth", "")
config.BindEnvAndSetDefault("installer.registry.username", "")
config.BindEnvAndSetDefault("installer.registry.password", "")
- config.BindEnv("fleet_policies_dir")
- config.SetDefault("fleet_layers", []string{})
// Data Jobs Monitoring config
config.BindEnvAndSetDefault("djm_config.enabled", false)
@@ -1022,6 +1009,11 @@ func InitConfig(config pkgconfigmodel.Setup) {
config.SetKnown("reverse_dns_enrichment.rate_limiter.throttle_error_threshold")
config.SetKnown("reverse_dns_enrichment.rate_limiter.recovery_intervals")
config.BindEnvAndSetDefault("reverse_dns_enrichment.rate_limiter.recovery_interval", time.Duration(0))
+
+ // Remote agents
+ config.BindEnvAndSetDefault("remote_agent_registry.idle_timeout", time.Duration(30*time.Second))
+ config.BindEnvAndSetDefault("remote_agent_registry.query_timeout", time.Duration(3*time.Second))
+ config.BindEnvAndSetDefault("remote_agent_registry.recommended_refresh_interval", time.Duration(10*time.Second))
}
func agent(config pkgconfigmodel.Setup) {
@@ -1101,6 +1093,13 @@ func agent(config pkgconfigmodel.Setup) {
// Use to output logs in JSON format
config.BindEnvAndSetDefault("log_format_json", false)
+ // Yaml keys which values are stripped from flare
+ config.BindEnvAndSetDefault("flare_stripped_keys", []string{})
+ config.BindEnvAndSetDefault("scrubber.additional_keys", []string{})
+
+ // Duration during which the host tags will be submitted with metrics.
+ config.BindEnvAndSetDefault("expected_tags_duration", time.Duration(0))
+
// Agent GUI access host
// 'http://localhost' is preferred over 'http://127.0.0.1' due to Internet Explorer behavior.
// Internet Explorer High Security Level does not support setting cookies via HTTP Header response.
@@ -1115,6 +1114,19 @@ func agent(config pkgconfigmodel.Setup) {
config.SetKnown("proxy.no_proxy")
}
+func fleet(config pkgconfigmodel.Setup) {
+ // Directory to store fleet policies
+ config.BindEnv("fleet_policies_dir")
+ config.SetDefault("fleet_layers", []string{})
+}
+
+func autoscaling(config pkgconfigmodel.Setup) {
+ // Autoscaling product
+ config.BindEnvAndSetDefault("autoscaling.workload.enabled", false)
+ config.BindEnvAndSetDefault("autoscaling.failover.enabled", false)
+ config.BindEnv("autoscaling.failover.metrics")
+}
+
func fips(config pkgconfigmodel.Setup) {
// Fips
config.BindEnvAndSetDefault("fips.enabled", false)
@@ -1513,6 +1525,9 @@ func logsagent(config pkgconfigmodel.Setup) {
config.BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true)
config.BindEnvAndSetDefault("logs_config.dd_url_443", "agent-443-intake.logs.datadoghq.com")
config.BindEnvAndSetDefault("logs_config.stop_grace_period", 30)
+ config.BindEnvAndSetDefault("logs_config.message_channel_size", 100)
+ config.BindEnvAndSetDefault("logs_config.payload_channel_size", 10)
+
// maximum time that the unix tailer will hold a log file open after it has been rotated
config.BindEnvAndSetDefault("logs_config.close_timeout", 60)
// maximum time that the windows tailer will hold a log file open, while waiting for
diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go
index 058eb7d7b107a..a6250a7156220 100644
--- a/pkg/config/setup/config_test.go
+++ b/pkg/config/setup/config_test.go
@@ -1405,7 +1405,7 @@ use_proxy_for_cloud_metadata: true
func TestServerlessConfigNumComponents(t *testing.T) {
// Enforce the number of config "components" reachable by the serverless agent
// to avoid accidentally adding entire components if it's not needed
- require.Len(t, serverlessConfigComponents, 22)
+ require.Len(t, serverlessConfigComponents, 24)
}
func TestServerlessConfigInit(t *testing.T) {
diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go
index edeee3ade54a3..3308d3a07a39c 100644
--- a/pkg/config/setup/system_probe.go
+++ b/pkg/config/setup/system_probe.go
@@ -362,7 +362,6 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) {
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "erpc_dentry_resolution_enabled"), true)
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "map_dentry_resolution_enabled"), true)
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "dentry_cache_size"), 1024)
- eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "remote_tagger"), true)
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "runtime_monitor.enabled"), false)
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.lazy_interface_prefixes"), []string{})
eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.classifier_priority"), 10)
diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go
index 8ae5f0a9734a9..8c63e91129e46 100644
--- a/pkg/config/setup/system_probe_cws.go
+++ b/pkg/config/setup/system_probe_cws.go
@@ -45,9 +45,10 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) {
cfg.SetDefault("runtime_security_config.windows_filename_cache_max", 16384)
cfg.SetDefault("runtime_security_config.windows_registry_cache_max", 4096)
// windows specific channel size for etw events
- cfg.SetDefault("runtime_security_config.etw_events_channel_size", 128)
- cfg.SetDefault("runtime_security_config.etw_events_max_buffers", 0)
+ cfg.SetDefault("runtime_security_config.etw_events_channel_size", 16384)
cfg.SetDefault("runtime_security_config.windows_probe_block_on_channel_send", false)
+ cfg.SetDefault("runtime_security_config.windows_write_event_rate_limiter_max_allowed", 4096)
+ cfg.SetDefault("runtime_security_config.windows_write_event_rate_limiter_period", "1s")
// CWS - activity dump
cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.enabled", true)
diff --git a/pkg/databasemonitoring/aws/aurora.go b/pkg/databasemonitoring/aws/aurora.go
index 9d5270b4df916..590de926ce977 100644
--- a/pkg/databasemonitoring/aws/aurora.go
+++ b/pkg/databasemonitoring/aws/aurora.go
@@ -11,11 +11,12 @@ package aws
import (
"context"
"fmt"
+ "hash/fnv"
+ "strconv"
+
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/rds"
"github.com/aws/aws-sdk-go-v2/service/rds/types"
- "hash/fnv"
- "strconv"
"strings"
)
@@ -101,24 +102,34 @@ func (c *Client) GetAuroraClustersFromTags(ctx context.Context, tags []string) (
return nil, fmt.Errorf("at least one tag filter is required")
}
clusterIdentifiers := make([]string, 0)
- clusters, err := c.client.DescribeDBClusters(ctx, &rds.DescribeDBClustersInput{
- Filters: []types.Filter{
- {
- Name: aws.String("engine"),
- Values: []string{
- auroraMysqlEngine, auroraPostgresqlEngine,
+ var marker *string
+ var err error
+ for {
+ clusters, err := c.client.DescribeDBClusters(ctx, &rds.DescribeDBClustersInput{
+ Marker: marker,
+ Filters: []types.Filter{
+ {
+ Name: aws.String("engine"),
+ Values: []string{
+ auroraMysqlEngine, auroraPostgresqlEngine,
+ },
},
},
- },
- })
- if err != nil {
- return nil, fmt.Errorf("error running GetAuroraClustersFromTags: %v", err)
- }
- for _, cluster := range clusters.DBClusters {
- if cluster.DBClusterIdentifier != nil && containsTags(cluster.TagList, tags) {
- clusterIdentifiers = append(clusterIdentifiers, *cluster.DBClusterIdentifier)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error running GetAuroraClustersFromTags: %v", err)
+ }
+ for _, cluster := range clusters.DBClusters {
+ if cluster.DBClusterIdentifier != nil && containsTags(cluster.TagList, tags) {
+ clusterIdentifiers = append(clusterIdentifiers, *cluster.DBClusterIdentifier)
+ }
+ }
+ marker = clusters.Marker
+ if marker == nil {
+ break
}
}
+
return clusterIdentifiers, err
}
diff --git a/pkg/databasemonitoring/aws/aurora_test.go b/pkg/databasemonitoring/aws/aurora_test.go
index 4cb7fed6c50fc..64869b7a1d4f6 100644
--- a/pkg/databasemonitoring/aws/aurora_test.go
+++ b/pkg/databasemonitoring/aws/aurora_test.go
@@ -580,6 +580,67 @@ func TestGetAuroraClustersFromTags(t *testing.T) {
tags: []string{"test:tag", "test2:tag2"},
expectedClusterIDs: []string{"test-cluster", "test-cluster-2"},
},
+ {
+ name: "multiple pages returns ids from all pages",
+ configureClient: func(k *MockrdsService) {
+ k.EXPECT().DescribeDBClusters(gomock.Any(), &rds.DescribeDBClustersInput{
+ Filters: []types.Filter{
+ {
+ Name: aws.String("engine"),
+ Values: []string{auroraMysqlEngine, auroraPostgresqlEngine},
+ },
+ },
+ }).Return(&rds.DescribeDBClustersOutput{
+ Marker: aws.String("next"),
+ DBClusters: []types.DBCluster{
+ {
+ DBClusterIdentifier: aws.String("test-cluster"),
+ TagList: []types.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ {
+ Key: aws.String("test2"),
+ Value: aws.String("tag2"),
+ },
+ },
+ },
+ },
+ }, nil).Times(1)
+ k.EXPECT().DescribeDBClusters(gomock.Any(), &rds.DescribeDBClustersInput{
+ Marker: aws.String("next"),
+ Filters: []types.Filter{
+ {
+ Name: aws.String("engine"),
+ Values: []string{auroraMysqlEngine, auroraPostgresqlEngine},
+ },
+ },
+ }).Return(&rds.DescribeDBClustersOutput{
+ DBClusters: []types.DBCluster{
+ {
+ DBClusterIdentifier: aws.String("test-cluster-2"),
+ TagList: []types.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ {
+ Key: aws.String("test2"),
+ Value: aws.String("tag2"),
+ },
+ {
+ Key: aws.String("foo"),
+ Value: aws.String("bar"),
+ },
+ },
+ },
+ },
+ }, nil).Times(1)
+ },
+ tags: []string{"test:tag", "test2:tag2"},
+ expectedClusterIDs: []string{"test-cluster", "test-cluster-2"},
+ },
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
diff --git a/pkg/databasemonitoring/aws/client.go b/pkg/databasemonitoring/aws/client.go
index 7248c31e81d2b..f1107562107d8 100644
--- a/pkg/databasemonitoring/aws/client.go
+++ b/pkg/databasemonitoring/aws/client.go
@@ -9,10 +9,11 @@ package aws
import (
"context"
+ "time"
+
"github.com/DataDog/datadog-agent/pkg/util/ec2"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/rds"
- "time"
)
//go:generate mockgen -source=$GOFILE -package=$GOPACKAGE -destination=rdsclient_mockgen.go
diff --git a/pkg/diagnose/check.go b/pkg/diagnose/check.go
index a807e8fde6eeb..9bef56f3fa4d6 100644
--- a/pkg/diagnose/check.go
+++ b/pkg/diagnose/check.go
@@ -15,7 +15,7 @@ import (
"github.com/DataDog/datadog-agent/comp/collector/collector"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery"
"github.com/DataDog/datadog-agent/comp/core/secrets"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
pkgcollector "github.com/DataDog/datadog-agent/pkg/collector"
diff --git a/pkg/diagnose/runner.go b/pkg/diagnose/runner.go
index cf19e468ebd27..5d7f2ee88d1db 100644
--- a/pkg/diagnose/runner.go
+++ b/pkg/diagnose/runner.go
@@ -18,7 +18,7 @@ import (
"github.com/DataDog/datadog-agent/comp/collector/collector"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery"
"github.com/DataDog/datadog-agent/comp/core/secrets"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl"
integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def"
diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go
index c712a1e3dbe56..02885a2b6772a 100644
--- a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go
+++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go
@@ -8,14 +8,13 @@
package diconfig
import (
- "debug/elf"
"fmt"
"reflect"
- "github.com/DataDog/datadog-agent/pkg/util/log"
-
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// inspectGoBinaries goes through each service and populates information about the binary
@@ -54,7 +53,7 @@ func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error {
procInfo.TypeMap = typeMap
- elfFile, err := elf.Open(procInfo.BinaryPath)
+ elfFile, err := safeelf.Open(procInfo.BinaryPath)
if err != nil {
return fmt.Errorf("could not open elf file %w", err)
}
diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go
index 3de712a7a515f..32d31e49c2188 100644
--- a/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go
+++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go
@@ -8,7 +8,6 @@
package diconfig
import (
- "debug/elf"
"fmt"
"os"
"path/filepath"
@@ -18,6 +17,8 @@ import (
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil"
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
+
"github.com/kr/pretty"
)
@@ -40,7 +41,7 @@ func TestBinaryInspection(t *testing.T) {
t.Error(err)
}
- f, err := elf.Open(binPath)
+ f, err := safeelf.Open(binPath)
if err != nil {
t.Error(err)
}
diff --git a/pkg/dynamicinstrumentation/diconfig/config_manager.go b/pkg/dynamicinstrumentation/diconfig/config_manager.go
index e4ba457d8b992..b939b8827d6cb 100644
--- a/pkg/dynamicinstrumentation/diconfig/config_manager.go
+++ b/pkg/dynamicinstrumentation/diconfig/config_manager.go
@@ -13,7 +13,8 @@ import (
"encoding/json"
"fmt"
- "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/cilium/ebpf/ringbuf"
+ "github.com/google/uuid"
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/codegen"
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics"
@@ -22,8 +23,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser"
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker"
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter"
- "github.com/cilium/ebpf/ringbuf"
- "github.com/google/uuid"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
)
type rcConfig struct {
diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go
index fbf403cf17526..85b9ba2ba4176 100644
--- a/pkg/dynamicinstrumentation/diconfig/dwarf.go
+++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go
@@ -10,16 +10,16 @@ package diconfig
import (
"cmp"
"debug/dwarf"
- "debug/elf"
"fmt"
"io"
"reflect"
"slices"
- "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/go-delve/delve/pkg/dwarf/godwarf"
"github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
- "github.com/go-delve/delve/pkg/dwarf/godwarf"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
func getTypeMap(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) {
@@ -184,7 +184,7 @@ func loadDWARF(binaryPath string) (*dwarf.Data, error) {
if dwarfData, ok := dwarfMap[binaryPath]; ok {
return dwarfData, nil
}
- elfFile, err := elf.Open(binaryPath)
+ elfFile, err := safeelf.Open(binaryPath)
if err != nil {
return nil, fmt.Errorf("couldn't open elf binary: %w", err)
}
diff --git a/pkg/dynamicinstrumentation/proctracker/proctracker.go b/pkg/dynamicinstrumentation/proctracker/proctracker.go
index f03d86c17efaa..fd5c6f750488c 100644
--- a/pkg/dynamicinstrumentation/proctracker/proctracker.go
+++ b/pkg/dynamicinstrumentation/proctracker/proctracker.go
@@ -10,7 +10,6 @@
package proctracker
import (
- "debug/elf"
"errors"
"os"
"path/filepath"
@@ -19,19 +18,19 @@ import (
"sync"
"syscall"
- "github.com/DataDog/datadog-agent/pkg/util/log"
-
- "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/link"
+ "golang.org/x/sys/unix"
+ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes"
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
"github.com/DataDog/datadog-agent/pkg/network/go/binversion"
"github.com/DataDog/datadog-agent/pkg/process/monitor"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
"github.com/DataDog/datadog-agent/pkg/security/utils"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
- "golang.org/x/sys/unix"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
type processTrackerCallback func(ditypes.DIProcs)
@@ -134,7 +133,7 @@ func (pt *ProcessTracker) inspectBinary(exePath string, pid uint32) {
}
defer f.Close()
- elfFile, err := elf.NewFile(f)
+ elfFile, err := safeelf.NewFile(f)
if err != nil {
log.Infof("file %s could not be parsed as an ELF file: %s", binPath, err)
return
diff --git a/pkg/ebpf/map_cleaner.go b/pkg/ebpf/map_cleaner.go
index 6c0493731b4f9..9ac674764fc3a 100644
--- a/pkg/ebpf/map_cleaner.go
+++ b/pkg/ebpf/map_cleaner.go
@@ -15,9 +15,25 @@ import (
"github.com/cilium/ebpf"
"github.com/DataDog/datadog-agent/pkg/ebpf/maps"
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
+const ebpfMapsCleanerModule = "ebpf__maps__cleaner"
+
+var defaultBuckets = []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000}
+var mapCleanerTelemetry = struct {
+ examined telemetry.Counter
+ deleted telemetry.Counter
+ aborts telemetry.Counter
+ elapsed telemetry.Histogram
+}{
+ telemetry.NewCounter(ebpfMapsCleanerModule, "examined", []string{"map_name", "module", "api"}, "Counter measuring how many entries are examined"),
+ telemetry.NewCounter(ebpfMapsCleanerModule, "deleted", []string{"map_name", "module", "api"}, "Counter measuring how many entries are deleted"),
+ telemetry.NewCounter(ebpfMapsCleanerModule, "aborts", []string{"map_name", "module", "api"}, "Counter measuring how many iteration aborts occur"),
+ telemetry.NewHistogram(ebpfMapsCleanerModule, "elapsed", []string{"map_name", "module", "api"}, "Histogram of elapsed time for each Clean call", defaultBuckets),
+}
+
// MapCleaner is responsible for periodically sweeping an eBPF map
// and deleting entries that satisfy a certain predicate function supplied by the user
type MapCleaner[K any, V any] struct {
@@ -29,10 +45,16 @@ type MapCleaner[K any, V any] struct {
// termination
stopOnce sync.Once
done chan struct{}
+
+ examined telemetry.SimpleCounter
+ singleDeleted telemetry.SimpleCounter
+ batchDeleted telemetry.SimpleCounter
+ aborts telemetry.SimpleCounter
+ elapsed telemetry.SimpleHistogram
}
// NewMapCleaner instantiates a new MapCleaner
-func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32) (*MapCleaner[K, V], error) {
+func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32, name, module string) (*MapCleaner[K, V], error) {
batchSize := defaultBatchSize
if defaultBatchSize > emap.MaxEntries() {
batchSize = emap.MaxEntries()
@@ -46,10 +68,21 @@ func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32) (*MapC
return nil, err
}
+ singleTags := map[string]string{"map_name": name, "module": module, "api": "single"}
+ batchTags := map[string]string{"map_name": name, "module": module, "api": "batch"}
+ tags := singleTags
+ if m.CanUseBatchAPI() {
+ tags = batchTags
+ }
return &MapCleaner[K, V]{
- emap: m,
- batchSize: batchSize,
- done: make(chan struct{}),
+ emap: m,
+ batchSize: batchSize,
+ done: make(chan struct{}),
+ examined: mapCleanerTelemetry.examined.WithTags(tags),
+ singleDeleted: mapCleanerTelemetry.deleted.WithTags(singleTags),
+ batchDeleted: mapCleanerTelemetry.deleted.WithTags(batchTags),
+ aborts: mapCleanerTelemetry.aborts.WithTags(tags),
+ elapsed: mapCleanerTelemetry.elapsed.WithTags(tags),
}, nil
}
@@ -66,16 +99,15 @@ func (mc *MapCleaner[K, V]) Clean(interval time.Duration, preClean func() bool,
return
}
- // Since kernel 5.6, the eBPF library supports batch operations on maps, which reduces the number of syscalls
- // required to clean the map. We use the new batch operations if they are supported (we check with a feature test instead
- // of a version comparison because some distros have backported this API), and fallback to
- // the old method otherwise. The new API is also more efficient because it minimizes the number of allocations.
- cleaner := mc.cleanWithoutBatches
- if mc.emap.CanUseBatchAPI() {
- cleaner = mc.cleanWithBatches
- }
-
mc.once.Do(func() {
+ // Since kernel 5.6, the eBPF library supports batch operations on maps, which reduces the number of syscalls
+ // required to clean the map. We use the new batch operations if they are supported (we check with a feature test instead
+ // of a version comparison because some distros have backported this API), and fallback to
+ // the old method otherwise. The new API is also more efficient because it minimizes the number of allocations.
+ cleaner := mc.cleanWithoutBatches
+ if mc.emap.CanUseBatchAPI() {
+ cleaner = mc.cleanWithBatches
+ }
ticker := time.NewTicker(interval)
go func() {
defer ticker.Stop()
@@ -118,12 +150,12 @@ func (mc *MapCleaner[K, V]) Stop() {
}
func (mc *MapCleaner[K, V]) cleanWithBatches(nowTS int64, shouldClean func(nowTS int64, k K, v V) bool) {
- now := time.Now()
+ start := time.Now()
var keysToDelete []K
var key K
var val V
- totalCount, deletedCount := 0, 0
+ totalCount, batchDeletedCount, singleDeletedCount := 0, 0, 0
it := mc.emap.IterateWithBatchSize(int(mc.batchSize))
for it.Next(&key, &val) {
@@ -136,38 +168,36 @@ func (mc *MapCleaner[K, V]) cleanWithBatches(nowTS int64, shouldClean func(nowTS
}
if err := it.Err(); err != nil {
- log.Errorf("error iterating map=%s: %s", mc.emap, err)
+ if errors.Is(err, ebpf.ErrIterationAborted) {
+ mc.aborts.Inc()
+ } else {
+ log.Errorf("error iterating map=%s: %s", mc.emap, err)
+ }
}
- var deletionError error
if len(keysToDelete) > 0 {
- deletedCount, deletionError = mc.emap.BatchDelete(keysToDelete)
+ var deletionError error
+ batchDeletedCount, deletionError = mc.emap.BatchDelete(keysToDelete)
// We might have a partial deletion (as a key might be missing due to other cleaning mechanism), so we want
// to have a best-effort method to delete all keys. We cannot know which keys were deleted, so we have to try
// and delete all of them one by one.
if errors.Is(deletionError, ebpf.ErrKeyNotExist) {
- deletionError = nil
for _, k := range keysToDelete {
if err := mc.emap.Delete(&k); err == nil {
- deletedCount++
+ singleDeletedCount++
}
}
}
}
- elapsed := time.Since(now)
- log.Debugf(
- "finished cleaning map=%s entries_checked=%d entries_deleted=%d deletion_error='%v' elapsed=%s",
- mc.emap,
- totalCount,
- deletedCount,
- deletionError,
- elapsed,
- )
+ mc.examined.Add(float64(totalCount))
+ mc.batchDeleted.Add(float64(batchDeletedCount))
+ mc.singleDeleted.Add(float64(singleDeletedCount))
+ mc.elapsed.Observe(float64(time.Since(start).Microseconds()))
}
func (mc *MapCleaner[K, V]) cleanWithoutBatches(nowTS int64, shouldClean func(nowTS int64, k K, v V) bool) {
- now := time.Now()
+ start := time.Now()
var keysToDelete []K
var key K
@@ -184,7 +214,11 @@ func (mc *MapCleaner[K, V]) cleanWithoutBatches(nowTS int64, shouldClean func(no
}
if err := entries.Err(); err != nil {
- log.Errorf("error iterating map=%s: %s", mc.emap, err)
+ if errors.Is(err, ebpf.ErrIterationAborted) {
+ mc.aborts.Inc()
+ } else {
+ log.Errorf("error iterating map=%s: %s", mc.emap, err)
+ }
}
for _, k := range keysToDelete {
@@ -194,12 +228,7 @@ func (mc *MapCleaner[K, V]) cleanWithoutBatches(nowTS int64, shouldClean func(no
}
}
- elapsed := time.Since(now)
- log.Debugf(
- "finished cleaning map=%s entries_checked=%d entries_deleted=%d elapsed=%s",
- mc.emap,
- totalCount,
- deletedCount,
- elapsed,
- )
+ mc.examined.Add(float64(totalCount))
+ mc.singleDeleted.Add(float64(deletedCount))
+ mc.elapsed.Observe(float64(time.Since(start).Microseconds()))
}
diff --git a/pkg/ebpf/map_cleaner_test.go b/pkg/ebpf/map_cleaner_test.go
index d7f5005a274db..635c882a36bed 100644
--- a/pkg/ebpf/map_cleaner_test.go
+++ b/pkg/ebpf/map_cleaner_test.go
@@ -78,7 +78,7 @@ func TestMapCleaner(t *testing.T) {
})
require.NoError(t, err)
- cleaner, err := NewMapCleaner[int64, int64](m, 10)
+ cleaner, err := NewMapCleaner[int64, int64](m, 10, "test", "")
require.NoError(t, err)
for i := 0; i < numMapEntries; i++ {
*key = int64(i)
@@ -127,7 +127,7 @@ func benchmarkBatchCleaner(b *testing.B, numMapEntries, batchSize uint32) {
})
require.NoError(b, err)
- cleaner, err := NewMapCleaner[int64, int64](m, batchSize)
+ cleaner, err := NewMapCleaner[int64, int64](m, batchSize, "test", "")
require.NoError(b, err)
b.ReportAllocs()
diff --git a/pkg/ebpf/uprobes/inspector.go b/pkg/ebpf/uprobes/inspector.go
index f26cb58f6c135..2edc9d806cdd9 100644
--- a/pkg/ebpf/uprobes/inspector.go
+++ b/pkg/ebpf/uprobes/inspector.go
@@ -8,7 +8,6 @@
package uprobes
import (
- "debug/elf"
"errors"
"fmt"
"runtime"
@@ -18,6 +17,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
"github.com/DataDog/datadog-agent/pkg/network/usm/utils"
"github.com/DataDog/datadog-agent/pkg/util/common"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// BinaryInspector implementors are responsible for extracting the metadata required to attach from a binary.
@@ -54,7 +54,7 @@ var _ BinaryInspector = &NativeBinaryInspector{}
// Inspect extracts the metadata required to attach to a binary from the ELF file at the given path.
func (p *NativeBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, error) {
path := fpath.HostPath
- elfFile, err := elf.Open(path)
+ elfFile, err := safeelf.Open(path)
if err != nil {
return nil, err
}
@@ -102,7 +102,7 @@ func (p *NativeBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolR
symbolMapBestEffort, _ := bininspect.GetAllSymbolsInSetByName(elfFile, bestEffortSymbols)
funcMap := make(map[string]bininspect.FunctionMetadata, len(symbolMap)+len(symbolMapBestEffort))
- for _, symMap := range []map[string]elf.Symbol{symbolMap, symbolMapBestEffort} {
+ for _, symMap := range []map[string]safeelf.Symbol{symbolMap, symbolMapBestEffort} {
for symbolName, symbol := range symMap {
m, err := p.symbolToFuncMetadata(elfFile, symbol)
if err != nil {
@@ -115,8 +115,8 @@ func (p *NativeBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolR
return funcMap, nil
}
-func (*NativeBinaryInspector) symbolToFuncMetadata(elfFile *elf.File, sym elf.Symbol) (*bininspect.FunctionMetadata, error) {
- manager.SanitizeUprobeAddresses(elfFile, []elf.Symbol{sym})
+func (*NativeBinaryInspector) symbolToFuncMetadata(elfFile *safeelf.File, sym safeelf.Symbol) (*bininspect.FunctionMetadata, error) {
+ manager.SanitizeUprobeAddresses(elfFile.File, []safeelf.Symbol{sym})
offset, err := bininspect.SymbolToOffset(elfFile, sym)
if err != nil {
return nil, err
diff --git a/pkg/ebpf/verifier/elf.go b/pkg/ebpf/verifier/elf.go
index 3f1210852fd05..95291d348a7d7 100644
--- a/pkg/ebpf/verifier/elf.go
+++ b/pkg/ebpf/verifier/elf.go
@@ -40,7 +40,6 @@ package verifier
import (
"debug/dwarf"
- "debug/elf"
"errors"
"fmt"
"io"
@@ -48,6 +47,8 @@ import (
"runtime"
"github.com/cilium/ebpf"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// getLineReader gets the line reader for a DWARF data object, searching in the compilation unit entry
@@ -133,7 +134,7 @@ func buildProgStartMap(dwarfData *dwarf.Data, symToSeq map[string]int) (map[prog
// buildSymbolToSequenceMap builds a map that links each symbol to the sequence index it belongs to.
// The address in the DWARF debug_line section is relative to the start of each sequence, but the symbol information
// doesn't explicitly say which sequence it belongs to. This function builds that map.
-func buildSymbolToSequenceMap(elfFile *elf.File) (map[string]int, error) {
+func buildSymbolToSequenceMap(elfFile *safeelf.File) (map[string]int, error) {
symbols, err := elfFile.Symbols()
if err != nil {
return nil, fmt.Errorf("failed to read symbols from ELF file: %w", err)
@@ -143,7 +144,7 @@ func buildSymbolToSequenceMap(elfFile *elf.File) (map[string]int, error) {
sectIndexToSeqIndex := make(map[int]int)
idx := 0
for i, sect := range elfFile.Sections {
- if sect.Flags&elf.SHF_EXECINSTR != 0 && sect.Size > 0 {
+ if sect.Flags&safeelf.SHF_EXECINSTR != 0 && sect.Size > 0 {
sectIndexToSeqIndex[i] = idx
idx++
}
@@ -160,32 +161,12 @@ func buildSymbolToSequenceMap(elfFile *elf.File) (map[string]int, error) {
return symToSeq, nil
}
-// openSafeELFFile opens an ELF file and recovers from panics that might happen when reading it.
-func openSafeELFFile(path string) (safe *elf.File, err error) {
- defer func() {
- r := recover()
- if r == nil {
- return
- }
-
- safe = nil
- err = fmt.Errorf("reading ELF file panicked: %s", r)
- }()
-
- file, err := elf.Open(path)
- if err != nil {
- return nil, err
- }
-
- return file, nil
-}
-
// getSourceMap builds the source map for an eBPF program. It returns two maps, one that
// for each program function maps the instruction offset to the source line information, and
// another that for each section maps the functions that belong to it.
func getSourceMap(file string, spec *ebpf.CollectionSpec) (map[string]map[int]*SourceLine, map[string][]string, error) {
// Open the ELF file
- elfFile, err := openSafeELFFile(file)
+ elfFile, err := safeelf.Open(file)
if err != nil {
return nil, nil, fmt.Errorf("cannot open ELF file %s: %w", file, err)
}
@@ -195,7 +176,7 @@ func getSourceMap(file string, spec *ebpf.CollectionSpec) (map[string]map[int]*S
// files because of missing support for relocations. However, we don't need them here as we're
// not necessary for line info, so we can skip them. The DWARF library will skip that processing
// if we set manually the type of the file to ET_EXEC.
- elfFile.Type = elf.ET_EXEC
+ elfFile.Type = safeelf.ET_EXEC
dwarfData, err := elfFile.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("cannot read DWARF data for %s: %w", file, err)
diff --git a/pkg/flare/envvars.go b/pkg/flare/envvars.go
index 2d232cdbe5728..f1889de288043 100644
--- a/pkg/flare/envvars.go
+++ b/pkg/flare/envvars.go
@@ -69,7 +69,6 @@ var allowedEnvvarNames = []string{
"DD_APM_REPLACE_TAGS",
"DD_APM_PROFILING_DD_URL",
"DD_APM_WINDOWS_PIPE_BUFFER_SIZE",
- "DD_APM_REMOTE_TAGGER",
"DD_APM_PEER_SERVICE_AGGREGATION",
"DD_APM_COMPUTE_STATS_BY_SPAN_KIND",
"DD_APM_PEER_TAGS_AGGREGATION",
diff --git a/pkg/fleet/env/env.go b/pkg/fleet/env/env.go
index a9460800cf5a5..8bdd849d8f06e 100644
--- a/pkg/fleet/env/env.go
+++ b/pkg/fleet/env/env.go
@@ -36,6 +36,8 @@ const (
envAgentUserName = "DD_AGENT_USER_NAME"
// envAgentUserNameCompat provides compatibility with the original MSI parameter name
envAgentUserNameCompat = "DDAGENTUSER_NAME"
+ envTags = "DD_TAGS"
+ envExtraTags = "DD_EXTRA_TAGS"
)
var defaultEnv = Env{
@@ -95,10 +97,15 @@ type Env struct {
CDNEnabled bool
CDNLocalDirPath string
+
+ Tags []string
}
// FromEnv returns an Env struct with values from the environment.
func FromEnv() *Env {
+ splitFunc := func(c rune) bool {
+ return c == ','
+ }
return &Env{
APIKey: getEnvOrDefault(envAPIKey, defaultEnv.APIKey),
Site: getEnvOrDefault(envSite, defaultEnv.Site),
@@ -127,6 +134,11 @@ func FromEnv() *Env {
CDNEnabled: strings.ToLower(os.Getenv(envCDNEnabled)) == "true",
CDNLocalDirPath: getEnvOrDefault(envCDNLocalDirPath, ""),
+
+ Tags: append(
+ strings.FieldsFunc(os.Getenv(envTags), splitFunc),
+ strings.FieldsFunc(os.Getenv(envExtraTags), splitFunc)...,
+ ),
}
}
@@ -141,6 +153,7 @@ func FromConfig(config model.Reader) *Env {
RegistryAuthOverride: config.GetString("installer.registry.auth"),
RegistryUsername: config.GetString("installer.registry.username"),
RegistryPassword: config.GetString("installer.registry.password"),
+ Tags: utils.GetConfiguredTags(config, false),
}
}
@@ -183,6 +196,9 @@ func (e *Env) ToEnv() []string {
slices.Sort(libraries)
env = append(env, envApmLibraries+"="+strings.Join(libraries, ","))
}
+ if len(e.Tags) > 0 {
+ env = append(env, envTags+"="+strings.Join(e.Tags, ","))
+ }
env = append(env, overridesByNameToEnv(envRegistryURL, e.RegistryOverrideByImage)...)
env = append(env, overridesByNameToEnv(envRegistryAuth, e.RegistryAuthOverrideByImage)...)
env = append(env, overridesByNameToEnv(envRegistryUsername, e.RegistryUsernameByImage)...)
diff --git a/pkg/fleet/env/env_test.go b/pkg/fleet/env/env_test.go
index 78839cb1e8d00..c1691313947ea 100644
--- a/pkg/fleet/env/env_test.go
+++ b/pkg/fleet/env/env_test.go
@@ -37,6 +37,7 @@ func TestFromEnv(t *testing.T) {
InstallScript: InstallScriptEnv{
APMInstrumentationEnabled: APMInstrumentationNotSet,
},
+ Tags: []string{},
},
},
{
@@ -65,6 +66,8 @@ func TestFromEnv(t *testing.T) {
envApmLibraries: "java,dotnet:latest,ruby:1.2",
envApmInstrumentationEnabled: "all",
envAgentUserName: "customuser",
+ envTags: "k1:v1,k2:v2",
+ envExtraTags: "k3:v3,k4:v4",
},
expected: &Env{
APIKey: "123456",
@@ -108,6 +111,7 @@ func TestFromEnv(t *testing.T) {
InstallScript: InstallScriptEnv{
APMInstrumentationEnabled: APMInstrumentationEnabledAll,
},
+ Tags: []string{"k1:v1", "k2:v2", "k3:v3", "k4:v4"},
},
},
{
@@ -135,6 +139,7 @@ func TestFromEnv(t *testing.T) {
InstallScript: InstallScriptEnv{
APMInstrumentationEnabled: APMInstrumentationNotSet,
},
+ Tags: []string{},
},
},
{
@@ -161,6 +166,7 @@ func TestFromEnv(t *testing.T) {
RegistryPasswordByImage: map[string]string{},
DefaultPackagesInstallOverride: map[string]bool{},
DefaultPackagesVersionOverride: map[string]string{},
+ Tags: []string{},
},
},
}
@@ -228,6 +234,7 @@ func TestToEnv(t *testing.T) {
"dotnet": "latest",
"ruby": "1.2",
},
+ Tags: []string{"k1:v1", "k2:v2"},
},
expected: []string{
"DD_API_KEY=123456",
@@ -251,6 +258,7 @@ func TestToEnv(t *testing.T) {
"DD_INSTALLER_DEFAULT_PKG_INSTALL_ANOTHER_PACKAGE=false",
"DD_INSTALLER_DEFAULT_PKG_VERSION_PACKAGE=1.2.3",
"DD_INSTALLER_DEFAULT_PKG_VERSION_ANOTHER_PACKAGE=4.5.6",
+ "DD_TAGS=k1:v1,k2:v2",
},
},
}
diff --git a/pkg/fleet/installer/service/apm_inject.go b/pkg/fleet/installer/service/apm_inject.go
index f1c8028c803c8..847587b7487a7 100644
--- a/pkg/fleet/installer/service/apm_inject.go
+++ b/pkg/fleet/installer/service/apm_inject.go
@@ -121,38 +121,6 @@ func (a *apmInjectorInstaller) Finish(err error) {
func (a *apmInjectorInstaller) Setup(ctx context.Context) error {
var err error
- // Set up defaults for agent sockets
- if err := a.configureSocketsEnv(ctx); err != nil {
- return err
- }
- // Symlinks for sysvinit
- if err := os.Symlink(envFilePath, "/etc/default/datadog-agent-trace"); err != nil && !os.IsExist(err) {
- return fmt.Errorf("failed to symlink %s to /etc/default/datadog-agent-trace: %w", envFilePath, err)
- }
- if err := os.Symlink(envFilePath, "/etc/default/datadog-agent"); err != nil && !os.IsExist(err) {
- return fmt.Errorf("failed to symlink %s to /etc/default/datadog-agent: %w", envFilePath, err)
- }
- systemdRunning, err := isSystemdRunning()
- if err != nil {
- return fmt.Errorf("failed to check if systemd is running: %w", err)
- }
- if systemdRunning {
- if err := addSystemDEnvOverrides(ctx, agentUnit); err != nil {
- return err
- }
- if err := addSystemDEnvOverrides(ctx, agentExp); err != nil {
- return err
- }
- if err := addSystemDEnvOverrides(ctx, traceAgentUnit); err != nil {
- return err
- }
- if err := addSystemDEnvOverrides(ctx, traceAgentExp); err != nil {
- return err
- }
- if err := systemdReload(ctx); err != nil {
- return err
- }
- }
if err := setupAppArmor(ctx); err != nil {
return err
}
@@ -213,6 +181,10 @@ func (a *apmInjectorInstaller) Instrument(ctx context.Context) (retErr error) {
return fmt.Errorf("DD_APM_INSTRUMENTATION_ENABLED is set to docker but docker is not installed")
}
if shouldInstrumentDocker(a.envs) && dockerIsInstalled {
+ // Set up defaults for agent sockets -- requires an agent restart
+ if err := a.configureSocketsEnv(ctx); err != nil {
+ return err
+ }
a.cleanups = append(a.cleanups, a.dockerConfigInstrument.cleanup)
rollbackDocker, err := a.instrumentDocker(ctx)
if err != nil {
diff --git a/pkg/fleet/installer/service/apm_sockets.go b/pkg/fleet/installer/service/apm_sockets.go
index b6d40a1010e01..a01889a47ea45 100644
--- a/pkg/fleet/installer/service/apm_sockets.go
+++ b/pkg/fleet/installer/service/apm_sockets.go
@@ -86,6 +86,36 @@ func (a *apmInjectorInstaller) configureSocketsEnv(ctx context.Context) (retErr
if err := os.Chmod(envFilePath, 0644); err != nil {
return fmt.Errorf("error changing permissions of %s: %w", envFilePath, err)
}
+
+ // Symlinks for sysvinit
+ if err := os.Symlink(envFilePath, "/etc/default/datadog-agent-trace"); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("failed to symlink %s to /etc/default/datadog-agent-trace: %w", envFilePath, err)
+ }
+ if err := os.Symlink(envFilePath, "/etc/default/datadog-agent"); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("failed to symlink %s to /etc/default/datadog-agent: %w", envFilePath, err)
+ }
+ systemdRunning, err := isSystemdRunning()
+ if err != nil {
+ return fmt.Errorf("failed to check if systemd is running: %w", err)
+ }
+ if systemdRunning {
+ if err := addSystemDEnvOverrides(ctx, agentUnit); err != nil {
+ return err
+ }
+ if err := addSystemDEnvOverrides(ctx, agentExp); err != nil {
+ return err
+ }
+ if err := addSystemDEnvOverrides(ctx, traceAgentUnit); err != nil {
+ return err
+ }
+ if err := addSystemDEnvOverrides(ctx, traceAgentExp); err != nil {
+ return err
+ }
+ if err := systemdReload(ctx); err != nil {
+ return err
+ }
+ }
+
return nil
}
diff --git a/pkg/fleet/internal/cdn/cdn_http.go b/pkg/fleet/internal/cdn/cdn_http.go
index accd3d5eb8665..736f6cea16068 100644
--- a/pkg/fleet/internal/cdn/cdn_http.go
+++ b/pkg/fleet/internal/cdn/cdn_http.go
@@ -39,7 +39,7 @@ func newCDNHTTP(env *env.Env, configDBPath string) (CDN, error) {
return &cdnHTTP{
client: client,
currentRootsVersion: 1,
- hostTagsGetter: newHostTagsGetter(),
+ hostTagsGetter: newHostTagsGetter(env),
}, nil
}
diff --git a/pkg/fleet/internal/cdn/cdn_rc.go b/pkg/fleet/internal/cdn/cdn_rc.go
index 21f1d65d1fb86..3329a795a2b99 100644
--- a/pkg/fleet/internal/cdn/cdn_rc.go
+++ b/pkg/fleet/internal/cdn/cdn_rc.go
@@ -41,7 +41,7 @@ func newCDNRC(env *env.Env, configDBPath string) (CDN, error) {
ctx, cc := context.WithTimeout(ctx, 10*time.Second)
defer cc()
- ht := newHostTagsGetter()
+ ht := newHostTagsGetter(env)
hostname, err := pkghostname.Get(ctx)
if err != nil {
hostname = "unknown"
diff --git a/pkg/fleet/internal/cdn/tags.go b/pkg/fleet/internal/cdn/tags.go
index 85f5eb3a931b7..53c20841143bb 100644
--- a/pkg/fleet/internal/cdn/tags.go
+++ b/pkg/fleet/internal/cdn/tags.go
@@ -7,57 +7,27 @@ package cdn
import (
"context"
- "os"
- "runtime"
"time"
"github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags"
detectenv "github.com/DataDog/datadog-agent/pkg/config/env"
"github.com/DataDog/datadog-agent/pkg/config/model"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
- "github.com/DataDog/datadog-agent/pkg/util/log"
- "gopkg.in/yaml.v2"
+ "github.com/DataDog/datadog-agent/pkg/fleet/env"
)
type hostTagsGetter struct {
- config model.Config
+ config model.Config
+ staticTags []string
}
-func newHostTagsGetter() hostTagsGetter {
+func newHostTagsGetter(env *env.Env) hostTagsGetter {
config := pkgconfigsetup.Datadog()
- detectenv.DetectFeatures(config) // For host tags to work
- err := populateTags(config)
- if err != nil {
- log.Warnf("Failed to populate tags from datadog.yaml: %v", err)
- }
+ detectenv.DetectFeatures(config)
return hostTagsGetter{
- config: config,
- }
-}
-
-type tagsConfigFields struct {
- Tags []string `yaml:"tags"`
- ExtraTags []string `yaml:"extra_tags"`
-}
-
-// populateTags is a best effort to get the tags from `datadog.yaml`.
-func populateTags(config model.Config) error {
- configPath := "/etc/datadog-agent/datadog.yaml"
- if runtime.GOOS == "windows" {
- configPath = "C:\\ProgramData\\Datadog\\datadog.yaml"
+ config: config,
+ staticTags: env.Tags,
}
- rawConfig, err := os.ReadFile(configPath)
- if err != nil {
- return err
- }
- var cfg tagsConfigFields
- err = yaml.Unmarshal(rawConfig, &cfg)
- if err != nil {
- return err
- }
- config.Set("tags", cfg.Tags, model.SourceFile)
- config.Set("extra_tags", cfg.ExtraTags, model.SourceFile)
- return nil
}
func (h *hostTagsGetter) get() []string {
@@ -66,6 +36,20 @@ func (h *hostTagsGetter) get() []string {
ctx, cc := context.WithTimeout(context.Background(), time.Second)
defer cc()
hostTags := hosttags.Get(ctx, true, h.config)
- tags := append(hostTags.System, hostTags.GoogleCloudPlatform...)
+
+ tags := []string{}
+ tags = append(tags, h.staticTags...)
+ tags = append(tags, hostTags.System...)
+ tags = append(tags, hostTags.GoogleCloudPlatform...)
+ tagSet := make(map[string]struct{})
+ for _, tag := range tags {
+ tagSet[tag] = struct{}{}
+ }
+ deduplicatedTags := make([]string, 0, len(tagSet))
+ for tag := range tagSet {
+ deduplicatedTags = append(deduplicatedTags, tag)
+ }
+ tags = deduplicatedTags
+
return tags
}
diff --git a/pkg/fleet/internal/oci/download.go b/pkg/fleet/internal/oci/download.go
index dbddf682494d5..dc8dbf3dc8726 100644
--- a/pkg/fleet/internal/oci/download.go
+++ b/pkg/fleet/internal/oci/download.go
@@ -10,11 +10,14 @@ import (
"context"
"errors"
"fmt"
+ "net"
"net/http"
"net/url"
+ "os"
"runtime"
"strconv"
"strings"
+ "syscall"
"time"
"github.com/awslabs/amazon-ecr-credential-helper/ecr-login"
@@ -320,10 +323,10 @@ func (d *DownloadedPackage) ExtractLayers(mediaType types.MediaType, dir string)
err = tar.Extract(uncompressedLayer, dir, layerMaxSize)
uncompressedLayer.Close()
if err != nil {
- if !isStreamResetError(err) {
+ if !isStreamResetError(err) && !isConnectionResetByPeerError(err) {
return fmt.Errorf("could not extract layer: %w", err)
}
- log.Warnf("stream error while extracting layer, retrying")
+ log.Warnf("network error while extracting layer, retrying")
// Clean up the directory before retrying to avoid partial extraction
err = tar.Clean(dir)
if err != nil {
@@ -381,6 +384,18 @@ func isStreamResetError(err error) bool {
return false
}
+// isConnectionResetByPeer returns true if the error is a connection reset by peer error
+func isConnectionResetByPeerError(err error) bool {
+ if netErr, ok := err.(*net.OpError); ok {
+ if syscallErr, ok := netErr.Err.(*os.SyscallError); ok {
+ if errno, ok := syscallErr.Err.(syscall.Errno); ok {
+ return errno == syscall.ECONNRESET
+ }
+ }
+ }
+ return false
+}
+
type usernamePasswordKeychain struct {
username string
password string
diff --git a/pkg/gpu/aggregator.go b/pkg/gpu/aggregator.go
index 1537869b11909..6e907c24428a1 100644
--- a/pkg/gpu/aggregator.go
+++ b/pkg/gpu/aggregator.go
@@ -99,8 +99,8 @@ func (agg *aggregator) getGPUUtilization() float64 {
// account for the fact that we might have more kernels enqueued than the
// GPU can run in parallel. This factor allows distributing the utilization
// over all the streams that were active during the interval.
-func (agg *aggregator) getStats(utilizationNormFactor float64) model.ProcessStats {
- var stats model.ProcessStats
+func (agg *aggregator) getStats(utilizationNormFactor float64) model.UtilizationMetrics {
+ var stats model.UtilizationMetrics
if agg.measuredIntervalNs > 0 {
stats.UtilizationPercentage = agg.getGPUUtilization() / utilizationNormFactor
diff --git a/pkg/gpu/consumer.go b/pkg/gpu/consumer.go
index 7a629ac1ad6b1..d18e76bfb79e0 100644
--- a/pkg/gpu/consumer.go
+++ b/pkg/gpu/consumer.go
@@ -8,11 +8,13 @@
package gpu
import (
+ "fmt"
"sync"
"sync/atomic"
"time"
"unsafe"
+ "github.com/NVIDIA/go-nvml/pkg/nvml"
"golang.org/x/sys/unix"
ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf"
@@ -107,41 +109,17 @@ func (c *cudaEventConsumer) Start() {
}
header := (*gpuebpf.CudaEventHeader)(unsafe.Pointer(&batchData.Data[0]))
+ dataPtr := unsafe.Pointer(&batchData.Data[0])
- pid := uint32(header.Pid_tgid >> 32)
- key := streamKey{pid: pid, stream: header.Stream_id}
-
- if _, ok := c.streamHandlers[key]; !ok {
- cgroup := unix.ByteSliceToString(header.Cgroup[:])
- containerID, err := cgroups.ContainerFilter("", cgroup)
- if err != nil {
- log.Errorf("error getting container ID for cgroup %s: %s", cgroup, err)
- }
- c.streamHandlers[key] = newStreamHandler(key.pid, containerID, c.sysCtx)
+ var err error
+ if isStreamSpecificEvent(gpuebpf.CudaEventType(header.Type)) {
+ err = c.handleStreamEvent(header, dataPtr, dataLen)
+ } else {
+ err = c.handleGlobalEvent(header, dataPtr, dataLen)
}
- switch header.Type {
- case gpuebpf.CudaEventTypeKernelLaunch:
- if dataLen != gpuebpf.SizeofCudaKernelLaunch {
- log.Errorf("Not enough data to parse kernel launch event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaKernelLaunch)
- continue
- }
- ckl := (*gpuebpf.CudaKernelLaunch)(unsafe.Pointer(&batchData.Data[0]))
- c.streamHandlers[key].handleKernelLaunch(ckl)
- case gpuebpf.CudaEventTypeMemory:
- if dataLen != gpuebpf.SizeofCudaMemEvent {
- log.Errorf("Not enough data to parse memory event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaMemEvent)
- continue
- }
- cme := (*gpuebpf.CudaMemEvent)(unsafe.Pointer(&batchData.Data[0]))
- c.streamHandlers[key].handleMemEvent(cme)
- case gpuebpf.CudaEventTypeSync:
- if dataLen != gpuebpf.SizeofCudaSync {
- log.Errorf("Not enough data to parse sync event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaSync)
- continue
- }
- cs := (*gpuebpf.CudaSync)(unsafe.Pointer(&batchData.Data[0]))
- c.streamHandlers[key].handleSync(cs)
+ if err != nil {
+ log.Errorf("Error processing CUDA event: %v", err)
}
batchData.Done()
@@ -156,6 +134,63 @@ func (c *cudaEventConsumer) Start() {
log.Trace("CUDA event consumer started")
}
+func isStreamSpecificEvent(eventType gpuebpf.CudaEventType) bool {
+ return eventType != gpuebpf.CudaEventTypeSetDevice
+}
+
+func (c *cudaEventConsumer) handleStreamEvent(header *gpuebpf.CudaEventHeader, data unsafe.Pointer, dataLen int) error {
+ streamHandler := c.getStreamHandler(header)
+
+ switch header.Type {
+ case gpuebpf.CudaEventTypeKernelLaunch:
+ if dataLen != gpuebpf.SizeofCudaKernelLaunch {
+ return fmt.Errorf("Not enough data to parse kernel launch event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaKernelLaunch)
+
+ }
+ streamHandler.handleKernelLaunch((*gpuebpf.CudaKernelLaunch)(data))
+ case gpuebpf.CudaEventTypeMemory:
+ if dataLen != gpuebpf.SizeofCudaMemEvent {
+ return fmt.Errorf("Not enough data to parse memory event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaMemEvent)
+
+ }
+ streamHandler.handleMemEvent((*gpuebpf.CudaMemEvent)(data))
+ case gpuebpf.CudaEventTypeSync:
+ if dataLen != gpuebpf.SizeofCudaSync {
+ return fmt.Errorf("Not enough data to parse sync event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaSync)
+
+ }
+ streamHandler.handleSync((*gpuebpf.CudaSync)(data))
+ default:
+ return fmt.Errorf("Unknown event type: %d", header.Type)
+ }
+
+ return nil
+}
+
+func getPidTidFromHeader(header *gpuebpf.CudaEventHeader) (uint32, uint32) {
+ tid := uint32(header.Pid_tgid & 0xFFFFFFFF)
+ pid := uint32(header.Pid_tgid >> 32)
+ return pid, tid
+}
+
+func (c *cudaEventConsumer) handleGlobalEvent(header *gpuebpf.CudaEventHeader, data unsafe.Pointer, dataLen int) error {
+ switch header.Type {
+ case gpuebpf.CudaEventTypeSetDevice:
+ if dataLen != gpuebpf.SizeofCudaSetDeviceEvent {
+ return fmt.Errorf("Not enough data to parse set device event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaSetDeviceEvent)
+
+ }
+ csde := (*gpuebpf.CudaSetDeviceEvent)(data)
+
+ pid, tid := getPidTidFromHeader(header)
+ c.sysCtx.setDeviceSelection(int(pid), int(tid), csde.Device)
+ default:
+ return fmt.Errorf("Unknown event type: %d", header.Type)
+ }
+
+ return nil
+}
+
func (c *cudaEventConsumer) handleProcessExit(pid uint32) {
for key, handler := range c.streamHandlers {
if key.pid == pid {
@@ -166,6 +201,46 @@ func (c *cudaEventConsumer) handleProcessExit(pid uint32) {
}
}
+func (c *cudaEventConsumer) getStreamKey(header *gpuebpf.CudaEventHeader) streamKey {
+ pid, tid := getPidTidFromHeader(header)
+
+ key := streamKey{
+ pid: pid,
+ stream: header.Stream_id,
+ gpuUUID: "",
+ }
+
+ // Try to get the GPU device if we can, but do not fail if we can't as we want to report
+ // the data even if we can't get the GPU UUID
+ gpuDevice, err := c.sysCtx.getCurrentActiveGpuDevice(int(pid), int(tid))
+ if err != nil {
+ log.Warnf("Error getting GPU device for process %d: %v", pid, err)
+ } else {
+ var ret nvml.Return
+ key.gpuUUID, ret = gpuDevice.GetUUID()
+ if ret != nvml.SUCCESS {
+ log.Warnf("Error getting GPU UUID for process %d: %v", pid, nvml.ErrorString(ret))
+ }
+ }
+
+ return key
+}
+
+func (c *cudaEventConsumer) getStreamHandler(header *gpuebpf.CudaEventHeader) *StreamHandler {
+ key := c.getStreamKey(header)
+ if _, ok := c.streamHandlers[key]; !ok {
+ cgroup := unix.ByteSliceToString(header.Cgroup[:])
+ containerID, err := cgroups.ContainerFilter("", cgroup)
+ if err != nil {
+ // We don't want to return an error here, as we can still process the event without the container ID
+ log.Errorf("error getting container ID for cgroup %s: %s", cgroup, err)
+ }
+ c.streamHandlers[key] = newStreamHandler(key.pid, containerID, c.sysCtx)
+ }
+
+ return c.streamHandlers[key]
+}
+
func (c *cudaEventConsumer) checkClosedProcesses() {
seenPIDs := make(map[uint32]struct{})
_ = kernel.WithAllProcs(c.cfg.ProcRoot, func(pid int) error {
diff --git a/pkg/gpu/context.go b/pkg/gpu/context.go
index be138e3575479..6c38ae4ad978c 100644
--- a/pkg/gpu/context.go
+++ b/pkg/gpu/context.go
@@ -14,10 +14,9 @@ import (
"github.com/prometheus/procfs"
- "github.com/DataDog/datadog-agent/pkg/gpu/cuda"
-
"github.com/NVIDIA/go-nvml/pkg/nvml"
+ "github.com/DataDog/datadog-agent/pkg/gpu/cuda"
"github.com/DataDog/datadog-agent/pkg/util/ktime"
)
@@ -46,6 +45,21 @@ type systemContext struct {
// procfsObj is the procfs filesystem object to retrieve process maps
procfsObj procfs.FS
+
+ // selectedDeviceByPIDAndTID maps each process ID to the map of thread IDs to selected device index.
+ // The reason to have a nested map is to allow easy cleanup of data when a process exits.
+ // The thread ID is important as the device selection in CUDA is per-thread.
+ // Note that this is the device index as seen by the process itself, which might
+ // be modified by the CUDA_VISIBLE_DEVICES environment variable later
+ selectedDeviceByPIDAndTID map[int]map[int]int32
+
+ // gpuDevices is the list of GPU devices on the system. Needs to be present to
+ // be able to compute the visible devices for a process
+ gpuDevices []nvml.Device
+
+ // visibleDevicesCache is a cache of visible devices for each process, to avoid
+ // looking into the environment variables every time
+ visibleDevicesCache map[int][]nvml.Device
}
// symbolsEntry embeds cuda.Symbols adding a field for keeping track of the last
@@ -61,12 +75,14 @@ func (e *symbolsEntry) updateLastUsedTime() {
func getSystemContext(nvmlLib nvml.Interface, procRoot string) (*systemContext, error) {
ctx := &systemContext{
- maxGpuThreadsPerDevice: make(map[int]int),
- deviceSmVersions: make(map[int]int),
- cudaSymbols: make(map[string]*symbolsEntry),
- pidMaps: make(map[int][]*procfs.ProcMap),
- nvmlLib: nvmlLib,
- procRoot: procRoot,
+ maxGpuThreadsPerDevice: make(map[int]int),
+ deviceSmVersions: make(map[int]int),
+ cudaSymbols: make(map[string]*symbolsEntry),
+ pidMaps: make(map[int][]*procfs.ProcMap),
+ nvmlLib: nvmlLib,
+ procRoot: procRoot,
+ selectedDeviceByPIDAndTID: make(map[int]map[int]int32),
+ visibleDevicesCache: make(map[int][]nvml.Device),
}
if err := ctx.fillDeviceInfo(); err != nil {
@@ -118,6 +134,8 @@ func (ctx *systemContext) fillDeviceInfo() error {
}
ctx.maxGpuThreadsPerDevice[i] = maxThreads
+
+ ctx.gpuDevices = append(ctx.gpuDevices, dev)
}
return nil
}
@@ -174,6 +192,8 @@ func (ctx *systemContext) getProcessMemoryMaps(pid int) ([]*procfs.ProcMap, erro
// removeProcess removes any data associated with a process from the system context.
func (ctx *systemContext) removeProcess(pid int) {
delete(ctx.pidMaps, pid)
+ delete(ctx.selectedDeviceByPIDAndTID, pid)
+ delete(ctx.visibleDevicesCache, pid)
}
// cleanupOldEntries removes any old entries that have not been accessed in a while, to avoid
@@ -188,3 +208,43 @@ func (ctx *systemContext) cleanupOldEntries() {
}
}
}
+
+// getCurrentActiveGpuDevice returns the active GPU device for a given process and thread, based on the
+// last selection (via cudaSetDevice) this thread made and the visible devices for the process.
+func (ctx *systemContext) getCurrentActiveGpuDevice(pid int, tid int) (nvml.Device, error) {
+ visibleDevices, ok := ctx.visibleDevicesCache[pid]
+ if !ok {
+ var err error
+ visibleDevices, err = cuda.GetVisibleDevicesForProcess(ctx.gpuDevices, pid, ctx.procRoot)
+ if err != nil {
+ return nil, fmt.Errorf("error getting visible devices for process %d: %w", pid, err)
+ }
+
+ ctx.visibleDevicesCache[pid] = visibleDevices
+ }
+
+ if len(visibleDevices) == 0 {
+ return nil, fmt.Errorf("no GPU devices for process %d", pid)
+ }
+
+ selectedDeviceIndex := int32(0)
+ pidMap, ok := ctx.selectedDeviceByPIDAndTID[pid]
+ if ok {
+ selectedDeviceIndex = pidMap[tid] // Defaults to 0, which is the same as CUDA
+ }
+
+ if selectedDeviceIndex < 0 || selectedDeviceIndex >= int32(len(visibleDevices)) {
+ return nil, fmt.Errorf("device index %d is out of range", selectedDeviceIndex)
+ }
+
+ return visibleDevices[selectedDeviceIndex], nil
+}
+
+// setDeviceSelection sets the selected device index for a given process and thread.
+func (ctx *systemContext) setDeviceSelection(pid int, tid int, deviceIndex int32) {
+ if _, ok := ctx.selectedDeviceByPIDAndTID[pid]; !ok {
+ ctx.selectedDeviceByPIDAndTID[pid] = make(map[int]int32)
+ }
+
+ ctx.selectedDeviceByPIDAndTID[pid][tid] = deviceIndex
+}
diff --git a/pkg/gpu/cuda/cubin.go b/pkg/gpu/cuda/cubin.go
index 3b89316324f95..bf91065f10d74 100644
--- a/pkg/gpu/cuda/cubin.go
+++ b/pkg/gpu/cuda/cubin.go
@@ -10,12 +10,13 @@ package cuda
import (
"bytes"
- "debug/elf"
"encoding/binary"
"fmt"
"io"
"regexp"
"strings"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// CubinKernelKey is the key to identify a kernel in a fatbin
@@ -126,7 +127,7 @@ type nvInfoItem struct {
Attr nvInfoAttr
}
-type sectionParserFunc func(*elf.Section, string) error
+type sectionParserFunc func(*safeelf.Section, string) error
// cubinParser is a helper struct to parse the cubin ELF sections
type cubinParser struct {
@@ -167,7 +168,7 @@ func (cp *cubinParser) parseCubinElf(data []byte) error {
}
data[elfVersionOffset] = 1
- cubinElf, err := elf.NewFile(bytes.NewReader(data))
+ cubinElf, err := safeelf.NewFile(bytes.NewReader(data))
if err != nil {
return fmt.Errorf("failed to parse cubin ELF: %w", err)
}
@@ -203,7 +204,7 @@ type nvInfoParsedItem struct {
value []byte
}
-func (cp *cubinParser) parseNvInfoSection(sect *elf.Section, kernelName string) error {
+func (cp *cubinParser) parseNvInfoSection(sect *safeelf.Section, kernelName string) error {
items := make(map[nvInfoAttr]nvInfoParsedItem)
buffer := sect.Open()
@@ -253,7 +254,7 @@ func (cp *cubinParser) parseNvInfoSection(sect *elf.Section, kernelName string)
return nil
}
-func (cp *cubinParser) parseTextSection(sect *elf.Section, kernelName string) error {
+func (cp *cubinParser) parseTextSection(sect *safeelf.Section, kernelName string) error {
if kernelName == "" {
return nil
}
@@ -265,7 +266,7 @@ func (cp *cubinParser) parseTextSection(sect *elf.Section, kernelName string) er
return nil
}
-func (cp *cubinParser) parseSharedMemSection(sect *elf.Section, kernelName string) error {
+func (cp *cubinParser) parseSharedMemSection(sect *safeelf.Section, kernelName string) error {
if kernelName == "" {
return nil
}
@@ -278,7 +279,7 @@ func (cp *cubinParser) parseSharedMemSection(sect *elf.Section, kernelName strin
var constantSectNameRegex = regexp.MustCompile(`\.nv\.constant\d\.(.*)`)
-func (cp *cubinParser) parseConstantMemSection(sect *elf.Section, _ string) error {
+func (cp *cubinParser) parseConstantMemSection(sect *safeelf.Section, _ string) error {
// Constant memory sections are named .nv.constantX.Y where X is the constant memory index and Y is the name
// so we have to do some custom parsing
match := constantSectNameRegex.FindStringSubmatch(sect.Name)
diff --git a/pkg/gpu/cuda/env.go b/pkg/gpu/cuda/env.go
index 50606bf09baec..930e047abaadf 100644
--- a/pkg/gpu/cuda/env.go
+++ b/pkg/gpu/cuda/env.go
@@ -119,7 +119,7 @@ func getDeviceWithIndex(systemDevices []nvml.Device, visibleDevice string) (nvml
}
if idx < 0 || idx >= len(systemDevices) {
- return nil, fmt.Errorf("device index %d is out of range [0, %d]", idx, len(systemDevices)-1)
+ return nil, fmt.Errorf("device index %d is out of range [0, %d)", idx, len(systemDevices))
}
return systemDevices[idx], nil
diff --git a/pkg/gpu/cuda/fatbin.go b/pkg/gpu/cuda/fatbin.go
index 09ea20e05cd27..5000defcd3934 100644
--- a/pkg/gpu/cuda/fatbin.go
+++ b/pkg/gpu/cuda/fatbin.go
@@ -17,13 +17,14 @@
package cuda
import (
- "debug/elf"
"encoding/binary"
"fmt"
"io"
"unsafe"
"github.com/pierrec/lz4/v4"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
type fatbinDataKind uint16
@@ -104,7 +105,7 @@ func (fbd *fatbinData) validate() error {
// ParseFatbinFromELFFilePath opens the given path and parses the resulting ELF for CUDA kernels
func ParseFatbinFromELFFilePath(path string) (*Fatbin, error) {
- elfFile, err := elf.Open(path)
+ elfFile, err := safeelf.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open ELF file %s: %w", path, err)
}
@@ -119,7 +120,7 @@ func getBufferOffset(buf io.Seeker) int64 {
}
// ParseFatbinFromELFFile parses the fatbin sections of the given ELF file and returns the information found in it
-func ParseFatbinFromELFFile(elfFile *elf.File) (*Fatbin, error) {
+func ParseFatbinFromELFFile(elfFile *safeelf.File) (*Fatbin, error) {
fatbin := &Fatbin{
Kernels: make(map[CubinKernelKey]*CubinKernel),
}
diff --git a/pkg/gpu/cuda/symbols.go b/pkg/gpu/cuda/symbols.go
index ea10b549dc1a9..650d6ca7add1e 100644
--- a/pkg/gpu/cuda/symbols.go
+++ b/pkg/gpu/cuda/symbols.go
@@ -6,8 +6,9 @@
package cuda
import (
- "debug/elf"
"fmt"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// Symbols holds all necessary data from a CUDA executable for
@@ -21,7 +22,7 @@ type Symbols struct {
// GetSymbols reads an ELF file from the given path and return the parsed CUDA data
func GetSymbols(path string) (*Symbols, error) {
- elfFile, err := elf.Open(path)
+ elfFile, err := safeelf.Open(path)
if err != nil {
return nil, fmt.Errorf("error opening ELF file %s: %w", path, err)
}
diff --git a/pkg/gpu/ebpf/c/runtime/gpu.c b/pkg/gpu/ebpf/c/runtime/gpu.c
index 71fca9defeb46..4ef9966d86128 100644
--- a/pkg/gpu/ebpf/c/runtime/gpu.c
+++ b/pkg/gpu/ebpf/c/runtime/gpu.c
@@ -22,6 +22,7 @@
BPF_RINGBUF_MAP(cuda_events, cuda_event_header_t);
BPF_LRU_MAP(cuda_alloc_cache, __u64, cuda_alloc_request_args_t, 1024)
BPF_LRU_MAP(cuda_sync_cache, __u64, __u64, 1024)
+BPF_LRU_MAP(cuda_set_device_cache, __u64, int, 1024)
// cudaLaunchKernel receives the dim3 argument by value, which gets translated as
// a 64 bit register with the x and y values in the lower and upper 32 bits respectively,
@@ -80,7 +81,7 @@ int BPF_UPROBE(uprobe__cudaMalloc, void **devPtr, size_t size) {
cuda_alloc_request_args_t args = { .devPtr = devPtr, .size = size };
log_debug("cudaMalloc: pid=%llu, devPtr=%llx, size=%lu", pid_tgid, (__u64)devPtr, size);
- bpf_map_update_elem(&cuda_alloc_cache, &pid_tgid, &args, BPF_ANY);
+ bpf_map_update_with_telemetry(cuda_alloc_cache, &pid_tgid, &args, BPF_ANY);
return 0;
}
@@ -136,7 +137,7 @@ int BPF_UPROBE(uprobe__cudaStreamSynchronize, __u64 stream) {
__u64 pid_tgid = bpf_get_current_pid_tgid();
log_debug("cudaStreamSynchronize: pid=%llu, stream=%llu", pid_tgid, stream);
- bpf_map_update_elem(&cuda_sync_cache, &pid_tgid, &stream, BPF_ANY);
+ bpf_map_update_with_telemetry(cuda_sync_cache, &pid_tgid, &stream, BPF_ANY);
return 0;
}
@@ -165,4 +166,46 @@ int BPF_URETPROBE(uretprobe__cudaStreamSynchronize) {
return 0;
}
+SEC("uprobe/cudaSetDevice")
+int BPF_UPROBE(uprobe__cudaSetDevice, int device) {
+ __u64 pid_tgid = bpf_get_current_pid_tgid();
+
+ log_debug("cudaSetDevice: pid_tgid=%llu, device=%u", pid_tgid, device);
+ bpf_map_update_with_telemetry(cuda_set_device_cache, &pid_tgid, &device, BPF_ANY);
+
+ return 0;
+}
+
+SEC("uretprobe/cudaSetDevice")
+int BPF_URETPROBE(uretprobe__cudaSetDevice) {
+ __u64 pid_tgid = bpf_get_current_pid_tgid();
+ int *device = NULL;
+ cuda_set_device_event_t event = { 0 };
+ __u32 retval = PT_REGS_RC(ctx);
+
+ log_debug("cudaSetDevice[ret]: pid_tgid=%llu, retval=%d\n", pid_tgid, retval);
+
+ if (retval != 0) {
+ // Do not emit event if cudaSetDevice failed
+ goto cleanup;
+ }
+
+ device = bpf_map_lookup_elem(&cuda_set_device_cache, &pid_tgid);
+ if (!device) {
+ log_debug("cudaSetDevice[ret]: failed to find cudaSetDevice request");
+ return 0;
+ }
+
+ fill_header(&event.header, 0, cuda_set_device);
+ event.device = *device;
+
+ log_debug("cudaSetDevice: EMIT pid_tgid=%llu, device=%d", event.header.pid_tgid, *device);
+ bpf_ringbuf_output_with_telemetry(&cuda_events, &event, sizeof(event), 0);
+
+cleanup:
+ bpf_map_delete_elem(&cuda_sync_cache, &pid_tgid);
+
+ return 0;
+}
+
char __license[] SEC("license") = "GPL";
diff --git a/pkg/gpu/ebpf/c/types.h b/pkg/gpu/ebpf/c/types.h
index a416773730465..364426cc6dabb 100644
--- a/pkg/gpu/ebpf/c/types.h
+++ b/pkg/gpu/ebpf/c/types.h
@@ -10,7 +10,8 @@ typedef struct {
typedef enum {
cuda_kernel_launch,
cuda_memory_event,
- cuda_sync
+ cuda_sync,
+ cuda_set_device,
} cuda_event_type_t;
#define MAX_CONTAINER_ID_LEN 129
@@ -52,4 +53,9 @@ typedef struct {
void **devPtr;
} cuda_alloc_request_args_t;
+typedef struct {
+ cuda_event_header_t header;
+ int device;
+} cuda_set_device_event_t;
+
#endif
diff --git a/pkg/gpu/ebpf/kprobe_types.go b/pkg/gpu/ebpf/kprobe_types.go
index 6afc73d6d33a7..dcb5f6ac450c7 100644
--- a/pkg/gpu/ebpf/kprobe_types.go
+++ b/pkg/gpu/ebpf/kprobe_types.go
@@ -23,9 +23,12 @@ type CudaSync C.cuda_sync_t
type CudaMemEvent C.cuda_memory_event_t
type CudaMemEventType C.cuda_memory_event_type_t
+type CudaSetDeviceEvent C.cuda_set_device_event_t
+
const CudaEventTypeKernelLaunch = C.cuda_kernel_launch
const CudaEventTypeMemory = C.cuda_memory_event
const CudaEventTypeSync = C.cuda_sync
+const CudaEventTypeSetDevice = C.cuda_set_device
const CudaMemAlloc = C.cudaMalloc
const CudaMemFree = C.cudaFree
@@ -34,3 +37,4 @@ const SizeofCudaKernelLaunch = C.sizeof_cuda_kernel_launch_t
const SizeofCudaMemEvent = C.sizeof_cuda_memory_event_t
const SizeofCudaEventHeader = C.sizeof_cuda_event_header_t
const SizeofCudaSync = C.sizeof_cuda_sync_t
+const SizeofCudaSetDeviceEvent = C.sizeof_cuda_set_device_event_t
diff --git a/pkg/gpu/ebpf/kprobe_types_linux.go b/pkg/gpu/ebpf/kprobe_types_linux.go
index 9f9f6d23b54bc..1a32675a49d9f 100644
--- a/pkg/gpu/ebpf/kprobe_types_linux.go
+++ b/pkg/gpu/ebpf/kprobe_types_linux.go
@@ -39,9 +39,16 @@ type CudaMemEvent struct {
}
type CudaMemEventType uint32
+type CudaSetDeviceEvent struct {
+ Header CudaEventHeader
+ Device int32
+ Pad_cgo_0 [4]byte
+}
+
const CudaEventTypeKernelLaunch = 0x0
const CudaEventTypeMemory = 0x1
const CudaEventTypeSync = 0x2
+const CudaEventTypeSetDevice = 0x3
const CudaMemAlloc = 0x0
const CudaMemFree = 0x1
@@ -50,3 +57,4 @@ const SizeofCudaKernelLaunch = 0xd0
const SizeofCudaMemEvent = 0xc0
const SizeofCudaEventHeader = 0xa8
const SizeofCudaSync = 0xa8
+const SizeofCudaSetDeviceEvent = 0xb0
diff --git a/pkg/gpu/probe.go b/pkg/gpu/probe.go
index a7b070881d6b7..d159cdc0d182a 100644
--- a/pkg/gpu/probe.go
+++ b/pkg/gpu/probe.go
@@ -50,9 +50,10 @@ var (
type bpfMapName = string
const (
- cudaEventsMap bpfMapName = "cuda_events"
- cudaAllocCacheMap bpfMapName = "cuda_alloc_cache"
- cudaSyncCacheMap bpfMapName = "cuda_sync_cache"
+ cudaEventsRingbuf bpfMapName = "cuda_events"
+ cudaAllocCacheMap bpfMapName = "cuda_alloc_cache"
+ cudaSyncCacheMap bpfMapName = "cuda_sync_cache"
+ cudaSetDeviceCacheMap bpfMapName = "cuda_set_device_cache"
)
// probeFuncName stores the ebpf hook function name
@@ -65,6 +66,8 @@ const (
cudaStreamSyncProbe probeFuncName = "uprobe__cudaStreamSynchronize"
cudaStreamSyncRetProbe probeFuncName = "uretprobe__cudaStreamSynchronize"
cudaFreeProbe probeFuncName = "uprobe__cudaFree"
+ cudaSetDeviceProbe probeFuncName = "uprobe__cudaSetDevice"
+ cudaSetDeviceRetProbe probeFuncName = "uretprobe__cudaSetDevice"
)
// ProbeDependencies holds the dependencies for the probe
@@ -234,12 +237,9 @@ func (p *Probe) setupManager(buf io.ReaderAt, opts manager.Options) error {
*/
Maps: []*manager.Map{
- {
- Name: cudaAllocCacheMap,
- },
- {
- Name: cudaSyncCacheMap,
- },
+ {Name: cudaAllocCacheMap},
+ {Name: cudaSyncCacheMap},
+ {Name: cudaSetDeviceCacheMap},
}}, "gpu", &ebpftelemetry.ErrorsTelemetryModifier{})
if opts.MapSpecEditors == nil {
@@ -260,7 +260,7 @@ func (p *Probe) setupManager(buf io.ReaderAt, opts manager.Options) error {
func (p *Probe) setupSharedBuffer(o *manager.Options) {
rbHandler := ddebpf.NewRingBufferHandler(consumerChannelSize)
rb := &manager.RingBuffer{
- Map: manager.Map{Name: cudaEventsMap},
+ Map: manager.Map{Name: cudaEventsRingbuf},
RingBufferOptions: manager.RingBufferOptions{
RecordHandler: rbHandler.RecordHandler,
RecordGetter: rbHandler.RecordGetter,
@@ -269,7 +269,7 @@ func (p *Probe) setupSharedBuffer(o *manager.Options) {
ringBufferSize := toPowerOf2(defaultRingBufferSize)
- o.MapSpecEditors[cudaEventsMap] = manager.MapSpecEditor{
+ o.MapSpecEditors[cudaEventsRingbuf] = manager.MapSpecEditor{
Type: ebpf.RingBuf,
MaxEntries: uint32(ringBufferSize),
KeySize: 0,
@@ -296,6 +296,8 @@ func getAttacherConfig(cfg *config.Config) uprobes.AttacherConfig {
&manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: cudaStreamSyncProbe}},
&manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: cudaStreamSyncRetProbe}},
&manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: cudaFreeProbe}},
+ &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: cudaSetDeviceProbe}},
+ &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: cudaSetDeviceRetProbe}},
},
},
},
diff --git a/pkg/gpu/probe_test.go b/pkg/gpu/probe_test.go
index 171a485a206a4..8e1f690dae0b9 100644
--- a/pkg/gpu/probe_test.go
+++ b/pkg/gpu/probe_test.go
@@ -11,9 +11,12 @@ import (
"testing"
"time"
+ "golang.org/x/exp/maps"
+
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/model"
"github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest"
consumerstestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil"
"github.com/DataDog/datadog-agent/pkg/gpu/config"
@@ -85,9 +88,15 @@ func (s *probeTestSuite) TestCanReceiveEvents() {
}
}
- return handlerStream != nil && handlerGlobal != nil
+ return handlerStream != nil && handlerGlobal != nil && len(handlerStream.kernelSpans) > 0 && len(handlerGlobal.allocations) > 0
}, 10*time.Second, 500*time.Millisecond, "stream and global handlers not found: existing is %v", probe.consumer.streamHandlers)
+ // Check device assignments
+ require.Contains(t, probe.consumer.sysCtx.selectedDeviceByPIDAndTID, cmd.Process.Pid)
+ tidMap := probe.consumer.sysCtx.selectedDeviceByPIDAndTID[cmd.Process.Pid]
+ require.Len(t, tidMap, 1)
+ require.ElementsMatch(t, []int{cmd.Process.Pid}, maps.Keys(tidMap))
+
require.Equal(t, 1, len(handlerStream.kernelSpans))
span := handlerStream.kernelSpans[0]
require.Equal(t, uint64(1), span.numKernels)
@@ -119,12 +128,48 @@ func (s *probeTestSuite) TestCanGenerateStats() {
stats, err := probe.GetAndFlush()
require.NoError(t, err)
require.NotNil(t, stats)
- require.NotEmpty(t, stats.ProcessStats)
- require.Contains(t, stats.ProcessStats, uint32(cmd.Process.Pid))
+ require.NotEmpty(t, stats.Metrics)
- pidStats := stats.ProcessStats[uint32(cmd.Process.Pid)]
- require.Greater(t, pidStats.UtilizationPercentage, 0.0) // percentage depends on the time this took to run, so it's not deterministic
- require.Equal(t, pidStats.Memory.MaxBytes, uint64(110))
+ metricKey := model.StatsKey{PID: uint32(cmd.Process.Pid), DeviceUUID: testutil.DefaultGpuUUID}
+ metrics := getMetricsEntry(metricKey, stats)
+ require.NotNil(t, metrics)
+
+ require.Greater(t, metrics.UtilizationPercentage, 0.0) // percentage depends on the time this took to run, so it's not deterministic
+ require.Equal(t, metrics.Memory.MaxBytes, uint64(110))
+}
+
+func (s *probeTestSuite) TestMultiGPUSupport() {
+ t := s.T()
+
+ probe := s.getProbe()
+
+ sampleArgs := testutil.SampleArgs{
+ StartWaitTimeSec: 6, // default wait time for WaitForProgramsToBeTraced is 5 seconds, give margin to attach manually to avoid flakes
+ EndWaitTimeSec: 1, // We need the process to stay active a bit so we can inspect its environment variables, if it ends too quickly we get no information
+ CudaVisibleDevicesEnv: "1,2",
+ SelectedDevice: 1,
+ }
+ // Visible devices 1,2 -> selects 1 in that array -> global device index = 2
+ selectedGPU := testutil.GPUUUIDs[2]
+
+ cmd := testutil.RunSampleWithArgs(t, testutil.CudaSample, sampleArgs)
+ utils.WaitForProgramsToBeTraced(t, gpuAttacherName, cmd.Process.Pid, utils.ManualTracingFallbackEnabled)
+
+ // Wait until the process finishes and we can get the stats. Run this instead of waiting for the process to finish
+ // so that we can time out correctly
+ require.Eventually(t, func() bool {
+ return !utils.IsProgramTraced(gpuAttacherName, cmd.Process.Pid)
+ }, 60*time.Second, 500*time.Millisecond, "process not stopped")
+
+ stats, err := probe.GetAndFlush()
+ require.NoError(t, err)
+ require.NotNil(t, stats)
+ metricKey := model.StatsKey{PID: uint32(cmd.Process.Pid), DeviceUUID: selectedGPU}
+ metrics := getMetricsEntry(metricKey, stats)
+ require.NotNil(t, metrics)
+
+ require.Greater(t, metrics.UtilizationPercentage, 0.0) // percentage depends on the time this took to run, so it's not deterministic
+ require.Equal(t, metrics.Memory.MaxBytes, uint64(110))
}
func (s *probeTestSuite) TestDetectsContainer() {
@@ -155,12 +200,12 @@ func (s *probeTestSuite) TestDetectsContainer() {
}
stats, err := probe.GetAndFlush()
+ key := model.StatsKey{PID: uint32(pid), DeviceUUID: testutil.DefaultGpuUUID}
require.NoError(t, err)
require.NotNil(t, stats)
- require.NotEmpty(t, stats.ProcessStats)
- require.Contains(t, stats.ProcessStats, uint32(pid))
+ pidStats := getMetricsEntry(key, stats)
+ require.NotNil(t, pidStats)
- pidStats := stats.ProcessStats[uint32(pid)]
require.Greater(t, pidStats.UtilizationPercentage, 0.0) // percentage depends on the time this took to run, so it's not deterministic
require.Equal(t, pidStats.Memory.MaxBytes, uint64(110))
}
diff --git a/pkg/gpu/stats.go b/pkg/gpu/stats.go
index 511956a45ec71..4e381fcf5f6dc 100644
--- a/pkg/gpu/stats.go
+++ b/pkg/gpu/stats.go
@@ -15,18 +15,18 @@ import (
// statsGenerator connects to the active stream handlers and generates stats for the GPU monitoring, by distributing
// the data to the aggregators which are responsible for computing the metrics.
type statsGenerator struct {
- streamHandlers map[streamKey]*StreamHandler // streamHandlers contains the map of active stream handlers.
- lastGenerationKTime int64 // lastGenerationTime is the kernel time of the last stats generation.
- currGenerationKTime int64 // currGenerationTime is the kernel time of the current stats generation.
- aggregators map[uint32]*aggregator // aggregators contains the map of aggregators
- sysCtx *systemContext // sysCtx is the system context with global GPU-system data
+ streamHandlers map[streamKey]*StreamHandler // streamHandlers contains the map of active stream handlers.
+ lastGenerationKTime int64 // lastGenerationTime is the kernel time of the last stats generation.
+ currGenerationKTime int64 // currGenerationTime is the kernel time of the current stats generation.
+ aggregators map[model.StatsKey]*aggregator // aggregators contains the map of aggregators
+ sysCtx *systemContext // sysCtx is the system context with global GPU-system data
}
func newStatsGenerator(sysCtx *systemContext, streamHandlers map[streamKey]*StreamHandler) *statsGenerator {
currKTime, _ := ddebpf.NowNanoseconds()
return &statsGenerator{
streamHandlers: streamHandlers,
- aggregators: make(map[uint32]*aggregator),
+ aggregators: make(map[model.StatsKey]*aggregator),
lastGenerationKTime: currKTime,
currGenerationKTime: currKTime,
sysCtx: sysCtx,
@@ -40,7 +40,7 @@ func (g *statsGenerator) getStats(nowKtime int64) *model.GPUStats {
g.currGenerationKTime = nowKtime
for key, handler := range g.streamHandlers {
- aggr := g.getOrCreateAggregator(key.pid)
+ aggr := g.getOrCreateAggregator(key)
currData := handler.getCurrentData(uint64(nowKtime))
pastData := handler.getPastData(true)
@@ -60,11 +60,15 @@ func (g *statsGenerator) getStats(nowKtime int64) *model.GPUStats {
normFactor := g.getNormalizationFactor()
stats := &model.GPUStats{
- ProcessStats: make(map[uint32]model.ProcessStats),
+ Metrics: make([]model.StatsTuple, 0, len(g.aggregators)),
}
- for pid, aggr := range g.aggregators {
- stats.ProcessStats[pid] = aggr.getStats(normFactor)
+ for aggKey, aggr := range g.aggregators {
+ entry := model.StatsTuple{
+ Key: aggKey,
+ UtilizationMetrics: aggr.getStats(normFactor),
+ }
+ stats.Metrics = append(stats.Metrics, entry)
}
g.lastGenerationKTime = g.currGenerationKTime
@@ -72,15 +76,20 @@ func (g *statsGenerator) getStats(nowKtime int64) *model.GPUStats {
return stats
}
-func (g *statsGenerator) getOrCreateAggregator(pid uint32) *aggregator {
- if _, ok := g.aggregators[pid]; !ok {
- g.aggregators[pid] = newAggregator(g.sysCtx)
+func (g *statsGenerator) getOrCreateAggregator(sKey streamKey) *aggregator {
+ aggKey := model.StatsKey{
+ PID: sKey.pid,
+ DeviceUUID: sKey.gpuUUID,
+ }
+
+ if _, ok := g.aggregators[aggKey]; !ok {
+ g.aggregators[aggKey] = newAggregator(g.sysCtx)
}
// Update the last check time and the measured interval, as these change between check runs
- g.aggregators[pid].lastCheckKtime = uint64(g.lastGenerationKTime)
- g.aggregators[pid].measuredIntervalNs = g.currGenerationKTime - g.lastGenerationKTime
- return g.aggregators[pid]
+ g.aggregators[aggKey].lastCheckKtime = uint64(g.lastGenerationKTime)
+ g.aggregators[aggKey].measuredIntervalNs = g.currGenerationKTime - g.lastGenerationKTime
+ return g.aggregators[aggKey]
}
// getNormalizationFactor returns the factor to use for utilization
diff --git a/pkg/gpu/stats_test.go b/pkg/gpu/stats_test.go
index 55605343ae931..4ebbec874ecdc 100644
--- a/pkg/gpu/stats_test.go
+++ b/pkg/gpu/stats_test.go
@@ -13,12 +13,23 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/model"
ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf"
gpuebpf "github.com/DataDog/datadog-agent/pkg/gpu/ebpf"
"github.com/DataDog/datadog-agent/pkg/gpu/testutil"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
)
+func getMetricsEntry(key model.StatsKey, stats *model.GPUStats) *model.UtilizationMetrics {
+ for _, entry := range stats.Metrics {
+ if entry.Key == key {
+ return &entry.UtilizationMetrics
+ }
+ }
+
+ return nil
+}
+
func getStatsGeneratorForTest(t *testing.T) (*statsGenerator, map[streamKey]*StreamHandler, int64) {
sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot())
require.NoError(t, err)
@@ -43,7 +54,7 @@ func TestGetStatsWithOnlyCurrentStreamData(t *testing.T) {
pid := uint32(1)
streamID := uint64(120)
pidTgid := uint64(pid)<<32 + uint64(pid)
- skeyKern := streamKey{pid: pid, stream: streamID}
+ skeyKern := streamKey{pid: pid, stream: streamID, gpuUUID: testutil.DefaultGpuUUID}
shmemSize := uint64(10)
streamHandlers[skeyKern] = &StreamHandler{
processEnded: false,
@@ -61,7 +72,7 @@ func TestGetStatsWithOnlyCurrentStreamData(t *testing.T) {
}
allocSize := uint64(10)
- skeyAlloc := streamKey{pid: pid, stream: 0}
+ skeyAlloc := streamKey{pid: pid, stream: 0, gpuUUID: testutil.DefaultGpuUUID}
streamHandlers[skeyAlloc] = &StreamHandler{
processEnded: false,
memAllocEvents: map[uint64]gpuebpf.CudaMemEvent{
@@ -78,15 +89,16 @@ func TestGetStatsWithOnlyCurrentStreamData(t *testing.T) {
checkKtime := ktime + int64(checkDuration)
stats := statsGen.getStats(checkKtime)
require.NotNil(t, stats)
- require.Contains(t, stats.ProcessStats, pid)
- pidStats := stats.ProcessStats[pid]
- require.Equal(t, allocSize*2, pidStats.Memory.CurrentBytes)
- require.Equal(t, allocSize*2, pidStats.Memory.MaxBytes)
+ metricsKey := model.StatsKey{PID: pid, DeviceUUID: testutil.DefaultGpuUUID}
+ metrics := getMetricsEntry(metricsKey, stats)
+ require.NotNil(t, metrics)
+ require.Equal(t, allocSize*2, metrics.Memory.CurrentBytes)
+ require.Equal(t, allocSize*2, metrics.Memory.MaxBytes)
// defined kernel is using only 1 core for 9 of the 10 seconds
expectedUtil := 1.0 / testutil.DefaultGpuCores * 0.9
- require.Equal(t, expectedUtil, pidStats.UtilizationPercentage)
+ require.Equal(t, expectedUtil, metrics.UtilizationPercentage)
}
func TestGetStatsWithOnlyPastStreamData(t *testing.T) {
@@ -97,7 +109,7 @@ func TestGetStatsWithOnlyPastStreamData(t *testing.T) {
pid := uint32(1)
streamID := uint64(120)
- skeyKern := streamKey{pid: pid, stream: streamID}
+ skeyKern := streamKey{pid: pid, stream: streamID, gpuUUID: testutil.DefaultGpuUUID}
numThreads := uint64(5)
streamHandlers[skeyKern] = &StreamHandler{
processEnded: false,
@@ -112,7 +124,7 @@ func TestGetStatsWithOnlyPastStreamData(t *testing.T) {
}
allocSize := uint64(10)
- skeyAlloc := streamKey{pid: pid, stream: 0}
+ skeyAlloc := streamKey{pid: pid, stream: 0, gpuUUID: testutil.DefaultGpuUUID}
streamHandlers[skeyAlloc] = &StreamHandler{
processEnded: false,
allocations: []*memoryAllocation{
@@ -130,17 +142,18 @@ func TestGetStatsWithOnlyPastStreamData(t *testing.T) {
checkKtime := ktime + int64(checkDuration)
stats := statsGen.getStats(checkKtime)
require.NotNil(t, stats)
- require.Contains(t, stats.ProcessStats, pid)
- pidStats := stats.ProcessStats[pid]
- require.Equal(t, uint64(0), pidStats.Memory.CurrentBytes)
- require.Equal(t, allocSize, pidStats.Memory.MaxBytes)
+ metricsKey := model.StatsKey{PID: pid, DeviceUUID: testutil.DefaultGpuUUID}
+ metrics := getMetricsEntry(metricsKey, stats)
+ require.NotNil(t, metrics)
+ require.Equal(t, uint64(0), metrics.Memory.CurrentBytes)
+ require.Equal(t, allocSize, metrics.Memory.MaxBytes)
// numThreads / DefaultGpuCores is the utilization for the
threadSecondsUsed := float64(numThreads) * float64(endKtime-startKtime) / 1e9
threadSecondsAvailable := float64(testutil.DefaultGpuCores) * checkDuration.Seconds()
expectedUtil := threadSecondsUsed / threadSecondsAvailable
- require.InDelta(t, expectedUtil, pidStats.UtilizationPercentage, 0.001)
+ require.InDelta(t, expectedUtil, metrics.UtilizationPercentage, 0.001)
}
func TestGetStatsWithPastAndCurrentData(t *testing.T) {
@@ -151,7 +164,7 @@ func TestGetStatsWithPastAndCurrentData(t *testing.T) {
pid := uint32(1)
streamID := uint64(120)
- skeyKern := streamKey{pid: pid, stream: streamID}
+ skeyKern := streamKey{pid: pid, stream: streamID, gpuUUID: testutil.DefaultGpuUUID}
pidTgid := uint64(pid)<<32 + uint64(pid)
numThreads := uint64(5)
shmemSize := uint64(10)
@@ -179,7 +192,7 @@ func TestGetStatsWithPastAndCurrentData(t *testing.T) {
}
allocSize := uint64(10)
- skeyAlloc := streamKey{pid: pid, stream: 0}
+ skeyAlloc := streamKey{pid: pid, stream: 0, gpuUUID: testutil.DefaultGpuUUID}
streamHandlers[skeyAlloc] = &StreamHandler{
processEnded: false,
allocations: []*memoryAllocation{
@@ -205,11 +218,12 @@ func TestGetStatsWithPastAndCurrentData(t *testing.T) {
checkKtime := ktime + int64(checkDuration)
stats := statsGen.getStats(checkKtime)
require.NotNil(t, stats)
- require.Contains(t, stats.ProcessStats, pid)
- pidStats := stats.ProcessStats[pid]
- require.Equal(t, allocSize+shmemSize, pidStats.Memory.CurrentBytes)
- require.Equal(t, allocSize*2+shmemSize, pidStats.Memory.MaxBytes)
+ metricsKey := model.StatsKey{PID: pid, DeviceUUID: testutil.DefaultGpuUUID}
+ metrics := getMetricsEntry(metricsKey, stats)
+ require.NotNil(t, metrics)
+ require.Equal(t, allocSize+shmemSize, metrics.Memory.CurrentBytes)
+ require.Equal(t, allocSize*2+shmemSize, metrics.Memory.MaxBytes)
// numThreads / DefaultGpuCores is the utilization for the
threadSecondsUsed := float64(numThreads) * float64(endKtime-startKtime) / 1e9
@@ -217,5 +231,5 @@ func TestGetStatsWithPastAndCurrentData(t *testing.T) {
expectedUtilKern1 := threadSecondsUsed / threadSecondsAvailable
expectedUtilKern2 := 1.0 / testutil.DefaultGpuCores * 0.9
expectedUtil := expectedUtilKern1 + expectedUtilKern2
- require.InDelta(t, expectedUtil, pidStats.UtilizationPercentage, 0.001)
+ require.InDelta(t, expectedUtil, metrics.UtilizationPercentage, 0.001)
}
diff --git a/pkg/gpu/stream.go b/pkg/gpu/stream.go
index 735d4bf2a0ea6..e9a33a33b2286 100644
--- a/pkg/gpu/stream.go
+++ b/pkg/gpu/stream.go
@@ -44,8 +44,9 @@ type enrichedKernelLaunch struct {
// streamKey is a unique identifier for a CUDA stream
type streamKey struct {
- pid uint32
- stream uint64
+ pid uint32
+ stream uint64
+ gpuUUID string
}
// streamData contains kernel spans and allocations for a stream
diff --git a/pkg/gpu/testdata/cudasample.c b/pkg/gpu/testdata/cudasample.c
index e099919977ba2..aa65df206567b 100644
--- a/pkg/gpu/testdata/cudasample.c
+++ b/pkg/gpu/testdata/cudasample.c
@@ -1,3 +1,8 @@
+// This is a dummy CUDA runtime library that can be used to test the GPU monitoring code without
+// having a real CUDA runtime library installed.
+// This binary should be run using the pkg/gpu/testutil/samplebins.go:RunSample* methods, which
+// call the binary with the correct arguments and environment variables to test the agent.
+
#include
#include
#include
@@ -27,26 +32,30 @@ cudaError_t cudaStreamSynchronize(cudaStream_t stream) {
return 0;
}
+cudaError_t cudaSetDevice(int device) {
+ return 0;
+}
+
int main(int argc, char **argv) {
cudaStream_t stream = 30;
- if (argc < 3) {
- fprintf(stderr, "Usage: %s \n", argv[0]);
+ if (argc != 4) {
+ fprintf(stderr, "Usage: %s \n", argv[0]);
return 1;
}
int waitStart = atoi(argv[1]);
int waitEnd = atoi(argv[2]);
+ int device = atoi(argv[3]);
fprintf(stderr, "Waiting for %d seconds before starting\n", waitStart);
// Give time for the eBPF program to load
sleep(waitStart);
- fprintf(stderr, "Starting calls.\n");
-
- fprintf(stderr, "Starting!\n");
+ fprintf(stderr, "Starting calls, will use device index %d\n", device);
+ cudaSetDevice(device);
cudaLaunchKernel((void *)0x1234, (dim3){ 1, 2, 3 }, (dim3){ 4, 5, 6 }, NULL, 10, stream);
void *ptr;
cudaMalloc(&ptr, 100);
diff --git a/pkg/gpu/testutil/mocks.go b/pkg/gpu/testutil/mocks.go
index 12429ea099fd2..b703a39001ef4 100644
--- a/pkg/gpu/testutil/mocks.go
+++ b/pkg/gpu/testutil/mocks.go
@@ -16,22 +16,40 @@ import (
// DefaultGpuCores is the default number of cores for a GPU device in the mock.
const DefaultGpuCores = 10
+// GPUUUIDs is a list of UUIDs for the devices returned by the mock
+var GPUUUIDs = []string{
+ "GPU-12345678-1234-1234-1234-123456789012",
+ "GPU-99999999-1234-1234-1234-123456789013",
+ "GPU-00000000-1234-1234-1234-123456789014",
+}
+
+// DefaultGpuUUID is the UUID for the default device returned by the mock
+var DefaultGpuUUID = GPUUUIDs[0]
+
+// GetDeviceMock returns a mock of the nvml.Device with the given UUID.
+func GetDeviceMock(uuid string) *nvmlmock.Device {
+ return &nvmlmock.Device{
+ GetNumGpuCoresFunc: func() (int, nvml.Return) {
+ return DefaultGpuCores, nvml.SUCCESS
+ },
+ GetCudaComputeCapabilityFunc: func() (int, int, nvml.Return) {
+ return 7, 5, nvml.SUCCESS
+ },
+ GetUUIDFunc: func() (string, nvml.Return) {
+ return uuid, nvml.SUCCESS
+ },
+ }
+}
+
// GetBasicNvmlMock returns a mock of the nvml.Interface with a single device with 10 cores,
// useful for basic tests that need only the basic interaction with NVML to be working.
func GetBasicNvmlMock() *nvmlmock.Interface {
return &nvmlmock.Interface{
DeviceGetCountFunc: func() (int, nvml.Return) {
- return 1, nvml.SUCCESS
+ return len(GPUUUIDs), nvml.SUCCESS
},
- DeviceGetHandleByIndexFunc: func(int) (nvml.Device, nvml.Return) {
- return &nvmlmock.Device{
- GetNumGpuCoresFunc: func() (int, nvml.Return) {
- return DefaultGpuCores, nvml.SUCCESS
- },
- GetCudaComputeCapabilityFunc: func() (int, int, nvml.Return) {
- return 7, 5, nvml.SUCCESS
- },
- }, nvml.SUCCESS
+ DeviceGetHandleByIndexFunc: func(index int) (nvml.Device, nvml.Return) {
+ return GetDeviceMock(GPUUUIDs[index]), nvml.SUCCESS
},
DeviceGetCudaComputeCapabilityFunc: func(nvml.Device) (int, int, nvml.Return) {
return 7, 5, nvml.SUCCESS
diff --git a/pkg/gpu/testutil/samplebins.go b/pkg/gpu/testutil/samplebins.go
index ddb8bd3992376..e918f62d76c56 100644
--- a/pkg/gpu/testutil/samplebins.go
+++ b/pkg/gpu/testutil/samplebins.go
@@ -51,12 +51,27 @@ type SampleArgs struct {
// eBPF probe has a chance to read the events and inspect the binary. To make the behavior of the sample binary
// more predictable and avoid flakiness in the tests, we introduce a delay before the binary exits.
EndWaitTimeSec int
+
+ // CudaVisibleDevicesEnv represents the value of the CUDA_VISIBLE_DEVICES environment variable
+ CudaVisibleDevicesEnv string
+
+ // SelectedDevice represents the device that the CUDA sample will select
+ SelectedDevice int
+}
+
+func (a *SampleArgs) getEnv() []string {
+ env := []string{}
+ if a.CudaVisibleDevicesEnv != "" {
+ env = append(env, fmt.Sprintf("CUDA_VISIBLE_DEVICES=%s", a.CudaVisibleDevicesEnv))
+ }
+ return env
}
func (a *SampleArgs) getCLIArgs() []string {
return []string{
strconv.Itoa(int(a.StartWaitTimeSec)),
strconv.Itoa(int(a.EndWaitTimeSec)),
+ strconv.Itoa(a.SelectedDevice),
}
}
@@ -88,8 +103,10 @@ func getBuiltSamplePath(t *testing.T, sample SampleName) string {
// GetDefaultArgs returns the default arguments for the sample binary
func GetDefaultArgs() SampleArgs {
return SampleArgs{
- StartWaitTimeSec: 5,
- EndWaitTimeSec: 0,
+ StartWaitTimeSec: 5,
+ EndWaitTimeSec: 1, // We need the process to stay active a bit so we can inspect its environment variables, if it ends too quickly we get no information
+ CudaVisibleDevicesEnv: "",
+ SelectedDevice: 0,
}
}
@@ -103,6 +120,9 @@ func runCommandAndPipeOutput(t *testing.T, command []string, args SampleArgs, lo
}
})
+ env := args.getEnv()
+ cmd.Env = append(cmd.Env, env...)
+
stdout, err := cmd.StdoutPipe()
require.NoError(t, err)
stderr, err := cmd.StderrPipe()
@@ -111,7 +131,7 @@ func runCommandAndPipeOutput(t *testing.T, command []string, args SampleArgs, lo
redirectReaderToLog(stdout, fmt.Sprintf("%s stdout", logName))
redirectReaderToLog(stderr, fmt.Sprintf("%s stderr", logName))
- log.Debugf("Running command %v", command)
+ log.Debugf("Running command %v, env=%v", command, env)
err = cmd.Start()
require.NoError(t, err)
@@ -141,7 +161,14 @@ func RunSampleInDockerWithArgs(t *testing.T, name SampleName, image DockerImage,
containerName := fmt.Sprintf("gpu-testutil-%s", utils.RandString(10))
mountArg := fmt.Sprintf("%s:%s", builtBin, builtBin)
- command := []string{"docker", "run", "--rm", "-v", mountArg, "--name", containerName, string(image), builtBin}
+ command := []string{"docker", "run", "--rm", "-v", mountArg, "--name", containerName}
+
+ // Pass environment variables to the container as docker args
+ for _, env := range args.getEnv() {
+ command = append(command, "-e", env)
+ }
+
+ command = append(command, string(image), builtBin)
_ = runCommandAndPipeOutput(t, command, args, string(name))
diff --git a/pkg/languagedetection/internal/detectors/go_detector.go b/pkg/languagedetection/internal/detectors/go_detector.go
index 4f8d7e86b73eb..ae9260bbeff8f 100644
--- a/pkg/languagedetection/internal/detectors/go_detector.go
+++ b/pkg/languagedetection/internal/detectors/go_detector.go
@@ -8,7 +8,6 @@
package detectors
import (
- "debug/elf"
"fmt"
"path"
"strconv"
@@ -17,6 +16,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels"
"github.com/DataDog/datadog-agent/pkg/network/go/binversion"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
//nolint:revive // TODO(PROC) Fix revive linter
@@ -32,12 +32,12 @@ func NewGoDetector() GoDetector {
// DetectLanguage allows for detecting if a process is a go process, and its version.
// Note that currently the goDetector only returns non-retriable errors since in all cases we will not be able to detect the language.
// Scenarios in which we can return an error:
-// - Program exits early, and we fail to call `elf.Open`. Note that in the future it may be possible to lock the directory using a system call.
+// - Program exits early, and we fail to call `safeelf.Open`. Note that in the future it may be possible to lock the directory using a system call.
// - Program is not a go binary, or has build tags stripped out. In this case we return a `dderrors.NotFound`.
func (d GoDetector) DetectLanguage(process languagemodels.Process) (languagemodels.Language, error) {
exePath := d.getHostProc(process.GetPid())
- bin, err := elf.Open(exePath)
+ bin, err := safeelf.Open(exePath)
if err != nil {
return languagemodels.Language{}, fmt.Errorf("open: %v", err)
}
diff --git a/pkg/logs/auditor/auditor.go b/pkg/logs/auditor/auditor.go
index cad651a7c7d27..05a196125cf03 100644
--- a/pkg/logs/auditor/auditor.go
+++ b/pkg/logs/auditor/auditor.go
@@ -16,7 +16,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/status/health"
"github.com/DataDog/datadog-agent/pkg/util/log"
- "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/logs/message"
)
@@ -104,7 +104,7 @@ func (a *RegistryAuditor) Stop() {
func (a *RegistryAuditor) createChannels() {
a.chansMutex.Lock()
defer a.chansMutex.Unlock()
- a.inputChan = make(chan *message.Payload, config.ChanSize)
+ a.inputChan = make(chan *message.Payload, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size"))
a.done = make(chan struct{})
}
diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod
index 350a008e3e91c..59d61c086e1ef 100644
--- a/pkg/logs/auditor/go.mod
+++ b/pkg/logs/auditor/go.mod
@@ -43,6 +43,7 @@ replace (
require (
github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0
github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3
@@ -56,7 +57,6 @@ require (
github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect
- github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect
diff --git a/pkg/logs/client/destination.go b/pkg/logs/client/destination.go
index b1bfa151bff9c..affb2bc6b7651 100644
--- a/pkg/logs/client/destination.go
+++ b/pkg/logs/client/destination.go
@@ -6,7 +6,9 @@
//nolint:revive // TODO(AML) Fix revive linter
package client
-import "github.com/DataDog/datadog-agent/pkg/logs/message"
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
// Destination sends a payload to a specific endpoint over a given network protocol.
type Destination interface {
@@ -16,6 +18,9 @@ type Destination interface {
// Destination target (e.g. https://agent-intake.logs.datadoghq.com)
Target() string
+ // Metadata returns the metadata of the destination
+ Metadata() *DestinationMetadata
+
// Start starts the destination send loop. close the intput to stop listening for payloads. stopChan is
// signaled when the destination has fully shutdown and all buffered payloads have been flushed. isRetrying is
// signaled when the retry state changes. isRetrying can be nil if you don't need to handle retries.
diff --git a/pkg/logs/client/destination_metadata.go b/pkg/logs/client/destination_metadata.go
new file mode 100644
index 0000000000000..1c4eaa429a559
--- /dev/null
+++ b/pkg/logs/client/destination_metadata.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package client
+
+import (
+ "fmt"
+)
+
+// DestinationMetadata contains metadata about a destination
+type DestinationMetadata struct {
+ componentName string
+ instanceID string
+ kind string
+ endpointId string
+ ReportingEnabled bool
+}
+
+// NewDestinationMetadata returns a new DestinationMetadata
+func NewDestinationMetadata(componentName, instanceID, kind, endpointId string) *DestinationMetadata {
+ return &DestinationMetadata{
+ componentName: componentName,
+ instanceID: instanceID,
+ kind: kind,
+ endpointId: endpointId,
+ ReportingEnabled: true,
+ }
+}
+
+// NewNoopDestinationMetadata returns a new DestinationMetadata with reporting disabled
+func NewNoopDestinationMetadata() *DestinationMetadata {
+ return &DestinationMetadata{
+ ReportingEnabled: false,
+ }
+}
+
+// TelemetryName returns the telemetry name for the destination
+func (d *DestinationMetadata) TelemetryName() string {
+ if !d.ReportingEnabled {
+ return ""
+ }
+ return fmt.Sprintf("%s_%s_%s_%s", d.componentName, d.instanceID, d.kind, d.endpointId)
+}
+
+// MonitorTag returns the monitor tag for the destination
+func (d *DestinationMetadata) MonitorTag() string {
+ if !d.ReportingEnabled {
+ return ""
+ }
+ return fmt.Sprintf("destination_%s_%s", d.kind, d.endpointId)
+}
diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod
index 58bfdf330a8cb..c22dea009faf2 100644
--- a/pkg/logs/client/go.mod
+++ b/pkg/logs/client/go.mod
@@ -43,6 +43,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../version
)
@@ -87,9 +88,11 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/viper v1.13.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
diff --git a/pkg/logs/client/go.sum b/pkg/logs/client/go.sum
index 447b5e01ec8da..c486d982207c3 100644
--- a/pkg/logs/client/go.sum
+++ b/pkg/logs/client/go.sum
@@ -12,6 +12,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
diff --git a/pkg/logs/client/http/destination.go b/pkg/logs/client/http/destination.go
index 954397f9882e7..7553b7e91d913 100644
--- a/pkg/logs/client/http/destination.go
+++ b/pkg/logs/client/http/destination.go
@@ -81,8 +81,10 @@ type Destination struct {
lastRetryError error
// Telemetry
- expVars *expvar.Map
- telemetryName string
+ expVars *expvar.Map
+ destMeta *client.DestinationMetadata
+ pipelineMonitor metrics.PipelineMonitor
+ utilization metrics.UtilizationMonitor
}
// NewDestination returns a new Destination.
@@ -94,8 +96,9 @@ func NewDestination(endpoint config.Endpoint,
destinationsContext *client.DestinationsContext,
maxConcurrentBackgroundSends int,
shouldRetry bool,
- telemetryName string,
- cfg pkgconfigmodel.Reader) *Destination {
+ destMeta *client.DestinationMetadata,
+ cfg pkgconfigmodel.Reader,
+ pipelineMonitor metrics.PipelineMonitor) *Destination {
return newDestination(endpoint,
contentType,
@@ -103,8 +106,9 @@ func NewDestination(endpoint config.Endpoint,
time.Second*10,
maxConcurrentBackgroundSends,
shouldRetry,
- telemetryName,
- cfg)
+ destMeta,
+ cfg,
+ pipelineMonitor)
}
func newDestination(endpoint config.Endpoint,
@@ -113,8 +117,9 @@ func newDestination(endpoint config.Endpoint,
timeout time.Duration,
maxConcurrentBackgroundSends int,
shouldRetry bool,
- telemetryName string,
- cfg pkgconfigmodel.Reader) *Destination {
+ destMeta *client.DestinationMetadata,
+ cfg pkgconfigmodel.Reader,
+ pipelineMonitor metrics.PipelineMonitor) *Destination {
if maxConcurrentBackgroundSends <= 0 {
maxConcurrentBackgroundSends = 1
@@ -130,8 +135,9 @@ func newDestination(endpoint config.Endpoint,
expVars := &expvar.Map{}
expVars.AddFloat(expVarIdleMsMapKey, 0)
expVars.AddFloat(expVarInUseMsMapKey, 0)
- if telemetryName != "" {
- metrics.DestinationExpVars.Set(telemetryName, expVars)
+
+ if destMeta.ReportingEnabled {
+ metrics.DestinationExpVars.Set(destMeta.TelemetryName(), expVars)
}
return &Destination{
@@ -150,8 +156,10 @@ func newDestination(endpoint config.Endpoint,
retryLock: sync.Mutex{},
shouldRetry: shouldRetry,
expVars: expVars,
- telemetryName: telemetryName,
+ destMeta: destMeta,
isMRF: endpoint.IsMRF,
+ pipelineMonitor: pipelineMonitor,
+ utilization: pipelineMonitor.MakeUtilizationMonitor(destMeta.MonitorTag()),
}
}
@@ -175,6 +183,11 @@ func (d *Destination) Target() string {
return d.url
}
+// Metadata returns the metadata of the destination
+func (d *Destination) Metadata() *client.DestinationMetadata {
+ return d.destMeta
+}
+
// Start starts reading the input channel
func (d *Destination) Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{}) {
stop := make(chan struct{})
@@ -186,22 +199,25 @@ func (d *Destination) run(input chan *message.Payload, output chan *message.Payl
var startIdle = time.Now()
for p := range input {
+ d.utilization.Start()
idle := float64(time.Since(startIdle) / time.Millisecond)
d.expVars.AddFloat(expVarIdleMsMapKey, idle)
- tlmIdle.Add(idle, d.telemetryName)
+ tlmIdle.Add(idle, d.destMeta.TelemetryName())
var startInUse = time.Now()
d.sendConcurrent(p, output, isRetrying)
inUse := float64(time.Since(startInUse) / time.Millisecond)
d.expVars.AddFloat(expVarInUseMsMapKey, inUse)
- tlmInUse.Add(inUse, d.telemetryName)
+ tlmInUse.Add(inUse, d.destMeta.TelemetryName())
startIdle = time.Now()
+ d.utilization.Stop()
}
// Wait for any pending concurrent sends to finish or terminate
d.wg.Wait()
d.updateRetryState(nil, isRetrying)
+ d.utilization.Cancel()
stopChan <- struct{}{}
}
@@ -348,6 +364,7 @@ func (d *Destination) unconditionalSend(payload *message.Payload) (err error) {
// internal error. We should retry these requests.
return client.NewRetryableError(errServer)
} else {
+ d.pipelineMonitor.ReportComponentEgress(payload, d.destMeta.MonitorTag())
return nil
}
}
@@ -422,7 +439,7 @@ func getMessageTimestamp(messages []*message.Message) int64 {
func prepareCheckConnectivity(endpoint config.Endpoint, cfg pkgconfigmodel.Reader) (*client.DestinationsContext, *Destination) {
ctx := client.NewDestinationsContext()
// Lower the timeout to 5s because HTTP connectivity test is done synchronously during the agent bootstrap sequence
- destination := newDestination(endpoint, JSONContentType, ctx, time.Second*5, 0, false, "", cfg)
+ destination := newDestination(endpoint, JSONContentType, ctx, time.Second*5, 0, false, client.NewNoopDestinationMetadata(), cfg, metrics.NewNoopPipelineMonitor(""))
return ctx, destination
}
diff --git a/pkg/logs/client/http/destination_test.go b/pkg/logs/client/http/destination_test.go
index 085845ff8f2ed..6adf3e7d3148f 100644
--- a/pkg/logs/client/http/destination_test.go
+++ b/pkg/logs/client/http/destination_test.go
@@ -16,6 +16,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/client"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
@@ -360,7 +361,7 @@ func TestDestinationHA(t *testing.T) {
}
isEndpointMRF := endpoint.IsMRF
- dest := NewDestination(endpoint, JSONContentType, client.NewDestinationsContext(), 1, false, "test", configmock.New(t))
+ dest := NewDestination(endpoint, JSONContentType, client.NewDestinationsContext(), 1, false, client.NewNoopDestinationMetadata(), configmock.New(t), metrics.NewNoopPipelineMonitor(""))
isDestMRF := dest.IsMRF()
assert.Equal(t, isEndpointMRF, isDestMRF)
diff --git a/pkg/logs/client/http/sync_destination.go b/pkg/logs/client/http/sync_destination.go
index 62625e6da611b..ed134f6896e8c 100644
--- a/pkg/logs/client/http/sync_destination.go
+++ b/pkg/logs/client/http/sync_destination.go
@@ -30,11 +30,11 @@ func NewSyncDestination(endpoint config.Endpoint,
contentType string,
destinationsContext *client.DestinationsContext,
senderDoneChan chan *sync.WaitGroup,
- telemetryName string,
+ destMeta *client.DestinationMetadata,
cfg pkgconfigmodel.Reader) *SyncDestination {
return &SyncDestination{
- destination: newDestination(endpoint, contentType, destinationsContext, time.Second*10, 1, false, telemetryName, cfg),
+ destination: newDestination(endpoint, contentType, destinationsContext, time.Second*10, 1, false, destMeta, cfg, metrics.NewNoopPipelineMonitor("0")),
senderDoneChan: senderDoneChan,
}
}
@@ -49,6 +49,11 @@ func (d *SyncDestination) Target() string {
return d.destination.url
}
+// Metadata returns the metadata of the destination
+func (d *SyncDestination) Metadata() *client.DestinationMetadata {
+ return d.destination.destMeta
+}
+
// Start starts reading the input channel
func (d *SyncDestination) Start(input chan *message.Payload, output chan *message.Payload, _ chan bool) (stopChan <-chan struct{}) {
stop := make(chan struct{})
@@ -62,7 +67,7 @@ func (d *SyncDestination) run(input chan *message.Payload, output chan *message.
for p := range input {
idle := float64(time.Since(startIdle) / time.Millisecond)
d.destination.expVars.AddFloat(expVarIdleMsMapKey, idle)
- tlmIdle.Add(idle, d.destination.telemetryName)
+ tlmIdle.Add(idle, d.destination.destMeta.TelemetryName())
var startInUse = time.Now()
err := d.destination.unconditionalSend(p)
@@ -84,7 +89,7 @@ func (d *SyncDestination) run(input chan *message.Payload, output chan *message.
inUse := float64(time.Since(startInUse) / time.Millisecond)
d.destination.expVars.AddFloat(expVarInUseMsMapKey, inUse)
- tlmInUse.Add(inUse, d.destination.telemetryName)
+ tlmInUse.Add(inUse, d.destination.destMeta.TelemetryName())
startIdle = time.Now()
}
diff --git a/pkg/logs/client/http/test_utils.go b/pkg/logs/client/http/test_utils.go
index 98dea192077fb..c082ec06ed47a 100644
--- a/pkg/logs/client/http/test_utils.go
+++ b/pkg/logs/client/http/test_utils.go
@@ -15,6 +15,7 @@ import (
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
"github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
)
// StatusCodeContainer is a lock around the status code to return
@@ -79,7 +80,7 @@ func NewTestServerWithOptions(statusCode int, senders int, retryDestination bool
endpoint.BackoffMax = 10
endpoint.RecoveryInterval = 1
- dest := NewDestination(endpoint, JSONContentType, destCtx, senders, retryDestination, "test", cfg)
+ dest := NewDestination(endpoint, JSONContentType, destCtx, senders, retryDestination, client.NewNoopDestinationMetadata(), cfg, metrics.NewNoopPipelineMonitor(""))
return &TestServer{
httpServer: ts,
DestCtx: destCtx,
diff --git a/pkg/logs/client/tcp/destination.go b/pkg/logs/client/tcp/destination.go
index f0ec9c1520649..1934ea2b3c930 100644
--- a/pkg/logs/client/tcp/destination.go
+++ b/pkg/logs/client/tcp/destination.go
@@ -58,6 +58,11 @@ func (d *Destination) Target() string {
return d.connManager.address()
}
+// Metadata is not supported for TCP destinations
+func (d *Destination) Metadata() *client.DestinationMetadata {
+ return client.NewNoopDestinationMetadata()
+}
+
// Start reads from the input, transforms a message into a frame and sends it to a remote server,
func (d *Destination) Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{}) {
stop := make(chan struct{})
diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod
index 3a16868bf29c8..0ea147ef97297 100644
--- a/pkg/logs/diagnostic/go.mod
+++ b/pkg/logs/diagnostic/go.mod
@@ -46,6 +46,7 @@ replace (
require (
github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0
github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3
github.com/stretchr/testify v1.9.0
@@ -58,7 +59,6 @@ require (
github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect
- github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect
diff --git a/pkg/logs/diagnostic/message_receiver.go b/pkg/logs/diagnostic/message_receiver.go
index 6a08dddc229d1..3559130757c07 100644
--- a/pkg/logs/diagnostic/message_receiver.go
+++ b/pkg/logs/diagnostic/message_receiver.go
@@ -9,7 +9,7 @@ import (
"sync"
"github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
- "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/logs/message"
)
@@ -49,14 +49,14 @@ func NewBufferedMessageReceiver(f Formatter, hostname hostnameinterface.Componen
}
}
return &BufferedMessageReceiver{
- inputChan: make(chan messagePair, config.ChanSize),
+ inputChan: make(chan messagePair, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size")),
formatter: f,
}
}
// Start opens new input channel
func (b *BufferedMessageReceiver) Start() {
- b.inputChan = make(chan messagePair, config.ChanSize)
+ b.inputChan = make(chan messagePair, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size"))
}
// Stop closes the input channel
@@ -109,7 +109,7 @@ func (b *BufferedMessageReceiver) HandleMessage(m *message.Message, rendered []b
// Filter writes the buffered events from the input channel formatted as a string to the output channel
func (b *BufferedMessageReceiver) Filter(filters *Filters, done <-chan struct{}) <-chan string {
- out := make(chan string, config.ChanSize)
+ out := make(chan string, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size"))
go func() {
defer close(out)
for {
diff --git a/pkg/logs/internal/tag/provider_test.go b/pkg/logs/internal/tag/provider_test.go
index 9eb0b249ff68d..c0f637fa87713 100644
--- a/pkg/logs/internal/tag/provider_test.go
+++ b/pkg/logs/internal/tag/provider_test.go
@@ -13,7 +13,7 @@ import (
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
@@ -23,7 +23,7 @@ import (
func TestProviderExpectedTags(t *testing.T) {
m := configmock.New(t)
clock := clock.NewMock()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
oldStartTime := pkgconfigsetup.StartTime
then := clock.Now()
diff --git a/pkg/logs/internal/util/adlistener/ad_test.go b/pkg/logs/internal/util/adlistener/ad_test.go
index 1ddbc2bf09fd4..d54417f77de84 100644
--- a/pkg/logs/internal/util/adlistener/ad_test.go
+++ b/pkg/logs/internal/util/adlistener/ad_test.go
@@ -16,8 +16,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/scheduler"
"github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+
+ taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
"github.com/DataDog/datadog-agent/pkg/util/fxutil"
@@ -32,8 +32,7 @@ func TestListenersGetScheduleCalls(t *testing.T) {
autodiscoveryimpl.MockModule(),
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
core.MockBundle(),
- fx.Provide(taggerimpl.NewMock),
- fx.Supply(tagger.NewFakeTaggerParams()),
+ fx.Provide(taggermock.NewMock),
)
got1 := make(chan struct{}, 1)
diff --git a/pkg/logs/launchers/container/launcher.go b/pkg/logs/launchers/container/launcher.go
index 8f143f25ddc63..0fcd4ede92c1f 100644
--- a/pkg/logs/launchers/container/launcher.go
+++ b/pkg/logs/launchers/container/launcher.go
@@ -11,7 +11,7 @@ package container
import (
"context"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
"github.com/DataDog/datadog-agent/pkg/logs/launchers"
diff --git a/pkg/logs/launchers/container/launcher_nodocker.go b/pkg/logs/launchers/container/launcher_nodocker.go
index 7240c621d19e0..12831d79342c3 100644
--- a/pkg/logs/launchers/container/launcher_nodocker.go
+++ b/pkg/logs/launchers/container/launcher_nodocker.go
@@ -9,7 +9,7 @@
package container
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
"github.com/DataDog/datadog-agent/pkg/logs/launchers"
diff --git a/pkg/logs/launchers/container/launcher_test.go b/pkg/logs/launchers/container/launcher_test.go
index a4dc9760b65f7..db167deece86b 100644
--- a/pkg/logs/launchers/container/launcher_test.go
+++ b/pkg/logs/launchers/container/launcher_test.go
@@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
@@ -37,7 +37,7 @@ func (tf *testFactory) MakeTailer(source *sources.LogSource) (tailerfactory.Tail
}
func TestStartStop(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger)
@@ -57,7 +57,7 @@ func TestStartStop(t *testing.T) {
}
func TestAddsRemovesSource(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger)
l.tailerFactory = &testFactory{
@@ -88,7 +88,7 @@ func TestAddsRemovesSource(t *testing.T) {
}
func TestCannotMakeTailer(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger)
l.tailerFactory = &testFactory{
@@ -111,7 +111,7 @@ func TestCannotMakeTailer(t *testing.T) {
}
func TestCannotStartTailer(t *testing.T) {
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger)
l.tailerFactory = &testFactory{
diff --git a/pkg/logs/launchers/container/tailerfactory/factory.go b/pkg/logs/launchers/container/tailerfactory/factory.go
index 07bce46bf10be..ff2e50191113c 100644
--- a/pkg/logs/launchers/container/tailerfactory/factory.go
+++ b/pkg/logs/launchers/container/tailerfactory/factory.go
@@ -10,7 +10,7 @@
package tailerfactory
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
"github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods"
diff --git a/pkg/logs/launchers/container/tailerfactory/tailers/socket.go b/pkg/logs/launchers/container/tailerfactory/tailers/socket.go
index 88e8a313db7f2..189823c7c62a7 100644
--- a/pkg/logs/launchers/container/tailerfactory/tailers/socket.go
+++ b/pkg/logs/launchers/container/tailerfactory/tailers/socket.go
@@ -19,7 +19,7 @@ import (
dockerutilPkg "github.com/DataDog/datadog-agent/pkg/util/docker"
"github.com/DataDog/datadog-agent/pkg/util/log"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
)
diff --git a/pkg/logs/launchers/file/launcher.go b/pkg/logs/launchers/file/launcher.go
index de04ecdbf6ec7..d2713813e7782 100644
--- a/pkg/logs/launchers/file/launcher.go
+++ b/pkg/logs/launchers/file/launcher.go
@@ -13,7 +13,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/util"
"github.com/DataDog/datadog-agent/pkg/util/log"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
@@ -21,6 +21,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/launchers"
fileprovider "github.com/DataDog/datadog-agent/pkg/logs/launchers/file/provider"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/pipeline"
"github.com/DataDog/datadog-agent/pkg/logs/sources"
status "github.com/DataDog/datadog-agent/pkg/logs/status/utils"
@@ -311,7 +312,8 @@ func (s *Launcher) startNewTailer(file *tailer.File, m config.TailingMode) bool
return false
}
- tailer := s.createTailer(file, s.pipelineProvider.NextPipelineChan())
+ channel, monitor := s.pipelineProvider.NextPipelineChanWithMonitor()
+ tailer := s.createTailer(file, channel, monitor)
var offset int64
var whence int
@@ -382,16 +384,17 @@ func (s *Launcher) restartTailerAfterFileRotation(oldTailer *tailer.Tailer, file
}
// createTailer returns a new initialized tailer
-func (s *Launcher) createTailer(file *tailer.File, outputChan chan *message.Message) *tailer.Tailer {
+func (s *Launcher) createTailer(file *tailer.File, outputChan chan *message.Message, pipelineMonitor metrics.PipelineMonitor) *tailer.Tailer {
tailerInfo := status.NewInfoRegistry()
tailerOptions := &tailer.TailerOptions{
- OutputChan: outputChan,
- File: file,
- SleepDuration: s.tailerSleepDuration,
- Decoder: decoder.NewDecoderFromSource(file.Source, tailerInfo),
- Info: tailerInfo,
- TagAdder: s.tagger,
+ OutputChan: outputChan,
+ File: file,
+ SleepDuration: s.tailerSleepDuration,
+ Decoder: decoder.NewDecoderFromSource(file.Source, tailerInfo),
+ Info: tailerInfo,
+ TagAdder: s.tagger,
+ PipelineMonitor: pipelineMonitor,
}
return tailer.NewTailer(tailerOptions)
@@ -399,7 +402,8 @@ func (s *Launcher) createTailer(file *tailer.File, outputChan chan *message.Mess
func (s *Launcher) createRotatedTailer(t *tailer.Tailer, file *tailer.File, pattern *regexp.Regexp) *tailer.Tailer {
tailerInfo := t.GetInfo()
- return t.NewRotatedTailer(file, decoder.NewDecoderFromSourceWithPattern(file.Source, pattern, tailerInfo), tailerInfo, s.tagger)
+ channel, monitor := s.pipelineProvider.NextPipelineChanWithMonitor()
+ return t.NewRotatedTailer(file, channel, monitor, decoder.NewDecoderFromSourceWithPattern(file.Source, pattern, tailerInfo), tailerInfo, s.tagger)
}
//nolint:revive // TODO(AML) Fix revive linter
diff --git a/pkg/logs/launchers/file/launcher_test.go b/pkg/logs/launchers/file/launcher_test.go
index ca89d36dbbefc..f799d3023a0b4 100644
--- a/pkg/logs/launchers/file/launcher_test.go
+++ b/pkg/logs/launchers/file/launcher_test.go
@@ -16,8 +16,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -51,13 +50,13 @@ type LauncherTestSuite struct {
source *sources.LogSource
openFilesLimit int
s *Launcher
- tagger tagger.Mock
+ tagger taggerMock.Mock
}
func (suite *LauncherTestSuite) SetupTest() {
suite.pipelineProvider = mock.NewMockProvider()
suite.outputChan = suite.pipelineProvider.NextPipelineChan()
- suite.tagger = taggerimpl.SetupFakeTagger(suite.T())
+ suite.tagger = taggerMock.SetupFakeTagger(suite.T())
var err error
suite.testDir = suite.T().TempDir()
@@ -223,7 +222,7 @@ func TestLauncherTestSuiteWithConfigID(t *testing.T) {
func TestLauncherScanStartNewTailer(t *testing.T) {
var path string
var msg *message.Message
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
IDs := []string{"", "123456789"}
@@ -269,7 +268,7 @@ func TestLauncherScanStartNewTailer(t *testing.T) {
func TestLauncherWithConcurrentContainerTailer(t *testing.T) {
testDir := t.TempDir()
path := fmt.Sprintf("%s/container.log", testDir)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// create launcher
openFilesLimit := 3
@@ -318,7 +317,7 @@ func TestLauncherWithConcurrentContainerTailer(t *testing.T) {
func TestLauncherTailFromTheBeginning(t *testing.T) {
testDir := t.TempDir()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// create launcher
openFilesLimit := 3
@@ -369,7 +368,7 @@ func TestLauncherTailFromTheBeginning(t *testing.T) {
func TestLauncherSetTail(t *testing.T) {
testDir := t.TempDir()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path1 := fmt.Sprintf("%s/test.log", testDir)
path2 := fmt.Sprintf("%s/test2.log", testDir)
@@ -396,7 +395,7 @@ func TestLauncherSetTail(t *testing.T) {
func TestLauncherConfigIdentifier(t *testing.T) {
testDir := t.TempDir()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path := fmt.Sprintf("%s/test.log", testDir)
os.Create(path)
@@ -421,7 +420,7 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) {
var path string
testDir := t.TempDir()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// creates files
path = fmt.Sprintf("%s/1.log", testDir)
@@ -464,7 +463,7 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) {
func TestLauncherUpdatesSourceForExistingTailer(t *testing.T) {
testDir := t.TempDir()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path := fmt.Sprintf("%s/*.log", testDir)
os.Create(path)
@@ -514,7 +513,7 @@ func TestLauncherScanRecentFilesWithRemoval(t *testing.T) {
err = os.Remove(path(name))
assert.Nil(t, err)
}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
createLauncher := func() *Launcher {
sleepDuration := 20 * time.Millisecond
@@ -569,7 +568,7 @@ func TestLauncherScanRecentFilesWithNewFiles(t *testing.T) {
testDir := t.TempDir()
baseTime := time.Date(2010, time.August, 10, 25, 0, 0, 0, time.UTC)
openFilesLimit := 2
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path := func(name string) string {
return fmt.Sprintf("%s/%s", testDir, name)
@@ -634,7 +633,7 @@ func TestLauncherFileRotation(t *testing.T) {
testDir := t.TempDir()
openFilesLimit := 2
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path := func(name string) string {
return fmt.Sprintf("%s/%s", testDir, name)
@@ -700,7 +699,7 @@ func TestLauncherFileDetectionSingleScan(t *testing.T) {
testDir := t.TempDir()
openFilesLimit := 2
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
path := func(name string) string {
return fmt.Sprintf("%s/%s", testDir, name)
diff --git a/pkg/logs/launchers/journald/launcher.go b/pkg/logs/launchers/journald/launcher.go
index b094516a73c96..423810cb11fa0 100644
--- a/pkg/logs/launchers/journald/launcher.go
+++ b/pkg/logs/launchers/journald/launcher.go
@@ -13,7 +13,7 @@ import (
"github.com/coreos/go-systemd/sdjournal"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
diff --git a/pkg/logs/launchers/journald/launcher_nosystemd.go b/pkg/logs/launchers/journald/launcher_nosystemd.go
index 81490ce621132..a06588e2f63e4 100644
--- a/pkg/logs/launchers/journald/launcher_nosystemd.go
+++ b/pkg/logs/launchers/journald/launcher_nosystemd.go
@@ -9,7 +9,7 @@
package journald
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
"github.com/DataDog/datadog-agent/pkg/logs/launchers"
diff --git a/pkg/logs/launchers/journald/launcher_test.go b/pkg/logs/launchers/journald/launcher_test.go
index 2772be91865f9..3b5dda2231a2f 100644
--- a/pkg/logs/launchers/journald/launcher_test.go
+++ b/pkg/logs/launchers/journald/launcher_test.go
@@ -14,7 +14,7 @@ import (
"github.com/coreos/go-systemd/sdjournal"
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/comp/logs/agent/flare"
"github.com/DataDog/datadog-agent/pkg/logs/auditor"
@@ -63,7 +63,7 @@ func (s *MockJournalFactory) NewJournalFromPath(path string) (tailer.Journal, er
func newTestLauncher(t *testing.T) *Launcher {
t.Helper()
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
launcher := NewLauncherWithFactory(&MockJournalFactory{}, flare.NewFlareController(), fakeTagger)
launcher.Start(launchers.NewMockSourceProvider(), pipeline.NewMockProvider(), auditor.New("", "registry.json", time.Hour, health.RegisterLiveness("fake")), tailers.NewTailerTracker())
diff --git a/pkg/logs/message/message.go b/pkg/logs/message/message.go
index e852a97d70ae2..0ad1f53a74b07 100644
--- a/pkg/logs/message/message.go
+++ b/pkg/logs/message/message.go
@@ -43,6 +43,20 @@ type Payload struct {
UnencodedSize int
}
+// Count returns the number of messages
+func (m *Payload) Count() int64 {
+ return int64(len(m.Messages))
+}
+
+// Size returns the size of the message.
+func (m *Payload) Size() int64 {
+ var size int64 = 0
+ for _, m := range m.Messages {
+ size += m.Size()
+ }
+ return size
+}
+
// Message represents a log line sent to datadog, with its metadata
type Message struct {
MessageContent
@@ -51,7 +65,9 @@ type Message struct {
Status string
IngestionTimestamp int64
// RawDataLen tracks the original size of the message content before any trimming/transformation.
- // This is used when calculating the tailer offset - so this will NOT always be equal to `len(Content)`.
+ // This is used when calculating the tailer offset - so this will NOT always be equal to `len(Content)`
+ // This is also used to track the original content size before the message is processed and encoded later
+ // in the pipeline.
RawDataLen int
// Tags added on processing
ProcessingTags []string
@@ -210,6 +226,7 @@ func NewMessage(content []byte, origin *Origin, status string, ingestionTimestam
},
Origin: origin,
Status: status,
+ RawDataLen: len(content),
IngestionTimestamp: ingestionTimestamp,
}
}
@@ -355,6 +372,16 @@ func (m *Message) TagsToString() string {
return m.Origin.TagsToString(m.ProcessingTags)
}
+// Count returns the number of messages
+func (m *Message) Count() int64 {
+ return 1
+}
+
+// Size returns the size of the message.
+func (m *Message) Size() int64 {
+ return int64(m.RawDataLen)
+}
+
// TruncatedReasonTag returns a tag with the reason for truncation.
func TruncatedReasonTag(reason string) string {
return fmt.Sprintf("truncated:%s", reason)
diff --git a/pkg/logs/metrics/capacity_monitor.go b/pkg/logs/metrics/capacity_monitor.go
new file mode 100644
index 0000000000000..3952a1bef9b67
--- /dev/null
+++ b/pkg/logs/metrics/capacity_monitor.go
@@ -0,0 +1,81 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// CapacityMonitor samples the average capacity of a component over a given interval.
+// Capacity is calculated as the difference between the ingress and egress of a payload.
+// Because data moves very quickly through components, we need to sample and aggregate this value over time.
+type CapacityMonitor struct {
+ sync.Mutex
+ ingress int64
+ ingressBytes int64
+ egress int64
+ egressBytes int64
+ avgItems float64
+ avgBytes float64
+ name string
+ instance string
+ tickChan <-chan time.Time
+}
+
+// NewCapacityMonitor creates a new CapacityMonitor
+func NewCapacityMonitor(name, instance string) *CapacityMonitor {
+ return newCapacityMonitorWithTick(name, instance, time.NewTicker(1*time.Second).C)
+}
+
+// newCapacityMonitorWithTick is used for testing.
+func newCapacityMonitorWithTick(name, instance string, tickChan <-chan time.Time) *CapacityMonitor {
+ return &CapacityMonitor{
+ name: name,
+ instance: instance,
+ avgItems: 0,
+ avgBytes: 0,
+ tickChan: tickChan,
+ }
+}
+
+// AddIngress records the ingress of a payload
+func (i *CapacityMonitor) AddIngress(pl MeasurablePayload) {
+ i.Lock()
+ defer i.Unlock()
+ i.ingress += pl.Count()
+ i.ingressBytes += pl.Size()
+ i.sample()
+}
+
+// AddEgress records the egress of a payload
+func (i *CapacityMonitor) AddEgress(pl MeasurablePayload) {
+ i.Lock()
+ defer i.Unlock()
+ i.egress += pl.Count()
+ i.egressBytes += pl.Size()
+ i.sample()
+
+}
+
+func (i *CapacityMonitor) sample() {
+ select {
+ case <-i.tickChan:
+ i.avgItems = ewma(float64(i.ingress-i.egress), i.avgItems)
+ i.avgBytes = ewma(float64(i.ingressBytes-i.egressBytes), i.avgBytes)
+ i.report()
+ default:
+ }
+}
+
+func ewma(newValue float64, oldValue float64) float64 {
+ return newValue*ewmaAlpha + (oldValue * (1 - ewmaAlpha))
+}
+
+func (i *CapacityMonitor) report() {
+ TlmUtilizationItems.Set(i.avgItems, i.name, i.instance)
+ TlmUtilizationBytes.Set(i.avgBytes, i.name, i.instance)
+}
diff --git a/pkg/logs/metrics/capacity_monitor_test.go b/pkg/logs/metrics/capacity_monitor_test.go
new file mode 100644
index 0000000000000..939383cd9bbaf
--- /dev/null
+++ b/pkg/logs/metrics/capacity_monitor_test.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type mockPayload struct {
+ count int64
+ size int64
+}
+
+func (m mockPayload) Size() int64 {
+ return m.size
+}
+func (m mockPayload) Count() int64 {
+ return m.count
+}
+
+func TestCapacityMonitor(t *testing.T) {
+
+ tickChan := make(chan time.Time, 1)
+ m := newCapacityMonitorWithTick("test", "test", tickChan)
+
+ assert.Equal(t, m.avgItems, 0.0)
+ assert.Equal(t, m.avgBytes, 0.0)
+
+ // Tick before ingress - causing sample and flush.
+ // Should converge on 10
+ for i := 0; i < 60; i++ {
+ tickChan <- time.Now()
+ m.AddIngress(mockPayload{count: 10, size: 10})
+ m.AddEgress(mockPayload{count: 10, size: 10})
+ }
+ assert.Greater(t, m.avgItems, 9.0)
+ assert.Greater(t, m.avgBytes, 9.0)
+
+ // Tick before egress - causing sample and flush.
+ // Should converge on 0
+ for i := 0; i < 60; i++ {
+ m.AddIngress(mockPayload{count: 10, size: 10})
+ tickChan <- time.Now()
+ m.AddEgress(mockPayload{count: 10, size: 10})
+ }
+
+ assert.Less(t, m.avgItems, 1.0)
+ assert.Less(t, m.avgBytes, 1.0)
+
+}
diff --git a/pkg/logs/metrics/go.mod b/pkg/logs/metrics/go.mod
index 993d59e4c15c0..967a7650c564f 100644
--- a/pkg/logs/metrics/go.mod
+++ b/pkg/logs/metrics/go.mod
@@ -8,10 +8,12 @@ replace (
github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry
github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil
github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker
)
require (
github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0
github.com/stretchr/testify v1.9.0
)
@@ -20,6 +22,7 @@ require (
github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
diff --git a/pkg/logs/metrics/go.sum b/pkg/logs/metrics/go.sum
index c930aa256d0e3..1e86541fbcdff 100644
--- a/pkg/logs/metrics/go.sum
+++ b/pkg/logs/metrics/go.sum
@@ -1,3 +1,5 @@
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
diff --git a/pkg/logs/metrics/metrics.go b/pkg/logs/metrics/metrics.go
index 49a6b30bbd597..0f8aaf35e2838 100644
--- a/pkg/logs/metrics/metrics.go
+++ b/pkg/logs/metrics/metrics.go
@@ -81,6 +81,16 @@ var (
// TlmLogsDiscardedFromSDSBuffer how many messages were dropped when waiting for an SDS configuration because the buffer is full
TlmLogsDiscardedFromSDSBuffer = telemetry.NewCounter("logs", "sds__dropped_from_buffer", nil, "Count of messages dropped from the buffer while waiting for an SDS configuration")
+
+ // TlmUtilizationRatio is the utilization ratio of a component.
+ // Utilization ratio is calculated as the ratio of time spent in use to the total time.
+ // This metric is internally sampled and exposed as an ewma in order to produce a useable value.
+ TlmUtilizationRatio = telemetry.NewGauge("logs_component_utilization", "ratio", []string{"name", "instance"}, "Gauge of the utilization ratio of a component")
+ // TlmUtilizationItems is the capacity of a component by number of elements
+ // Both the number of items and the number of bytes are aggregated and exposed as a ewma.
+ TlmUtilizationItems = telemetry.NewGauge("logs_component_utilization", "items", []string{"name", "instance"}, "Gauge of the number of items currently held in a component and it's bufferes")
+ // TlmUtilizationBytes is the capacity of a component by number of bytes
+ TlmUtilizationBytes = telemetry.NewGauge("logs_component_utilization", "bytes", []string{"name", "instance"}, "Gauge of the number of bytes currently held in a component and it's bufferes")
)
func init() {
diff --git a/pkg/logs/metrics/pipeline_monitor.go b/pkg/logs/metrics/pipeline_monitor.go
new file mode 100644
index 0000000000000..e98f23c403733
--- /dev/null
+++ b/pkg/logs/metrics/pipeline_monitor.go
@@ -0,0 +1,112 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "sync"
+)
+
+const ewmaAlpha = 2 / (float64(30) + 1) // ~ 0.0645 for a 30s window
+
+// MeasurablePayload represents a payload that can be measured in bytes and count
+type MeasurablePayload interface {
+ Size() int64
+ Count() int64
+}
+
+// PipelineMonitor is an interface for monitoring the capacity of a pipeline.
+// Pipeline monitors are used to measure both capacity and utilization of components.
+type PipelineMonitor interface {
+ ID() string
+ ReportComponentIngress(size MeasurablePayload, name string)
+ ReportComponentEgress(size MeasurablePayload, name string)
+ MakeUtilizationMonitor(name string) UtilizationMonitor
+}
+
+// NoopPipelineMonitor is a no-op implementation of PipelineMonitor.
+// Some instances of logs components do not need to report capacity metrics and
+// should use this implementation.
+type NoopPipelineMonitor struct {
+ instanceID string
+}
+
+// NewNoopPipelineMonitor creates a new no-op pipeline monitor
+func NewNoopPipelineMonitor(id string) *NoopPipelineMonitor {
+ return &NoopPipelineMonitor{
+ instanceID: id,
+ }
+}
+
+// ID returns the instance id of the monitor
+func (n *NoopPipelineMonitor) ID() string {
+ return n.instanceID
+}
+
+// ReportComponentIngress does nothing.
+func (n *NoopPipelineMonitor) ReportComponentIngress(_ MeasurablePayload, _ string) {}
+
+// ReportComponentEgress does nothing.
+func (n *NoopPipelineMonitor) ReportComponentEgress(_ MeasurablePayload, _ string) {}
+
+// MakeUtilizationMonitor returns a no-op utilization monitor.
+func (n *NoopPipelineMonitor) MakeUtilizationMonitor(_ string) UtilizationMonitor {
+ return &NoopUtilizationMonitor{}
+}
+
+// TelemetryPipelineMonitor is a PipelineMonitor that reports capacity metrics to telemetry
+type TelemetryPipelineMonitor struct {
+ monitors map[string]*CapacityMonitor
+ instanceID string
+ lock sync.RWMutex
+}
+
+// NewTelemetryPipelineMonitor creates a new pipeline monitort that reports capacity and utiilization metrics as telemetry
+func NewTelemetryPipelineMonitor(instanceID string) *TelemetryPipelineMonitor {
+ return &TelemetryPipelineMonitor{
+ monitors: make(map[string]*CapacityMonitor),
+ instanceID: instanceID,
+ lock: sync.RWMutex{},
+ }
+}
+
+func (c *TelemetryPipelineMonitor) getMonitor(name string) *CapacityMonitor {
+ key := name + c.instanceID
+
+ c.lock.RLock()
+ monitor, exists := c.monitors[key]
+ c.lock.RUnlock()
+
+ if !exists {
+ c.lock.Lock()
+ if c.monitors[key] == nil {
+ c.monitors[key] = NewCapacityMonitor(name, c.instanceID)
+ }
+ monitor = c.monitors[key]
+ c.lock.Unlock()
+ }
+
+ return monitor
+}
+
+// ID returns the instance id of the monitor
+func (c *TelemetryPipelineMonitor) ID() string {
+ return c.instanceID
+}
+
+// MakeUtilizationMonitor creates a new utilization monitor for a component.
+func (c *TelemetryPipelineMonitor) MakeUtilizationMonitor(name string) UtilizationMonitor {
+ return NewTelemetryUtilizationMonitor(name, c.instanceID)
+}
+
+// ReportComponentIngress reports the ingress of a payload to a component.
+func (c *TelemetryPipelineMonitor) ReportComponentIngress(pl MeasurablePayload, name string) {
+ c.getMonitor(name).AddIngress(pl)
+}
+
+// ReportComponentEgress reports the egress of a payload from a component.
+func (c *TelemetryPipelineMonitor) ReportComponentEgress(pl MeasurablePayload, name string) {
+ c.getMonitor(name).AddEgress(pl)
+}
diff --git a/pkg/logs/metrics/pipeline_monitor_test.go b/pkg/logs/metrics/pipeline_monitor_test.go
new file mode 100644
index 0000000000000..2f96f05c0d7be
--- /dev/null
+++ b/pkg/logs/metrics/pipeline_monitor_test.go
@@ -0,0 +1,46 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPipelineMonitorTracksCorrectCapacity(t *testing.T) {
+ pm := NewTelemetryPipelineMonitor("test")
+
+ pm.ReportComponentIngress(mockPayload{count: 1, size: 1}, "1")
+ pm.ReportComponentIngress(mockPayload{count: 5, size: 5}, "5")
+ pm.ReportComponentIngress(mockPayload{count: 10, size: 10}, "10")
+
+ assert.Equal(t, pm.getMonitor("1").ingress, int64(1))
+ assert.Equal(t, pm.getMonitor("1").ingressBytes, int64(1))
+
+ assert.Equal(t, pm.getMonitor("5").ingress, int64(5))
+ assert.Equal(t, pm.getMonitor("5").ingressBytes, int64(5))
+
+ assert.Equal(t, pm.getMonitor("10").ingress, int64(10))
+ assert.Equal(t, pm.getMonitor("10").ingressBytes, int64(10))
+
+ pm.ReportComponentEgress(mockPayload{count: 1, size: 1}, "1")
+ pm.ReportComponentEgress(mockPayload{count: 5, size: 5}, "5")
+ pm.ReportComponentEgress(mockPayload{count: 10, size: 10}, "10")
+
+ assert.Equal(t, pm.getMonitor("1").egress, int64(1))
+ assert.Equal(t, pm.getMonitor("1").egressBytes, int64(1))
+
+ assert.Equal(t, pm.getMonitor("5").egress, int64(5))
+ assert.Equal(t, pm.getMonitor("5").egressBytes, int64(5))
+
+ assert.Equal(t, pm.getMonitor("10").egress, int64(10))
+ assert.Equal(t, pm.getMonitor("10").egressBytes, int64(10))
+
+ assert.Equal(t, pm.getMonitor("1").ingress-pm.getMonitor("1").egress, int64(0))
+ assert.Equal(t, pm.getMonitor("5").ingress-pm.getMonitor("5").egress, int64(0))
+ assert.Equal(t, pm.getMonitor("10").ingress-pm.getMonitor("10").egress, int64(0))
+}
diff --git a/pkg/logs/metrics/utilization_monitor.go b/pkg/logs/metrics/utilization_monitor.go
new file mode 100644
index 0000000000000..704681d784f10
--- /dev/null
+++ b/pkg/logs/metrics/utilization_monitor.go
@@ -0,0 +1,113 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/utilizationtracker"
+)
+
+// UtilizationMonitor is an interface for monitoring the utilization of a component.
+type UtilizationMonitor interface {
+ Start()
+ Stop()
+ Cancel()
+}
+
+// NoopUtilizationMonitor is a no-op implementation of UtilizationMonitor.
+type NoopUtilizationMonitor struct{}
+
+// Start does nothing.
+func (n *NoopUtilizationMonitor) Start() {}
+
+// Stop does nothing.
+func (n *NoopUtilizationMonitor) Stop() {}
+
+// Cancel does nothing.
+func (n *NoopUtilizationMonitor) Cancel() {}
+
+// TelemetryUtilizationMonitor is a UtilizationMonitor that reports utilization metrics as telemetry.
+type TelemetryUtilizationMonitor struct {
+ name string
+ instance string
+ started bool
+ ut *utilizationtracker.UtilizationTracker
+ cancel func()
+}
+
+// NewTelemetryUtilizationMonitor creates a new TelemetryUtilizationMonitor.
+func NewTelemetryUtilizationMonitor(name, instance string) *TelemetryUtilizationMonitor {
+
+ utilizationTracker := utilizationtracker.NewUtilizationTracker(1*time.Second, ewmaAlpha)
+ cancel := startTrackerTicker(utilizationTracker, 1*time.Second)
+
+ t := &TelemetryUtilizationMonitor{
+ name: name,
+ instance: instance,
+ started: false,
+ ut: utilizationTracker,
+ cancel: cancel,
+ }
+ t.startUtilizationUpdater()
+ return t
+}
+
+// Start tracks a start event in the utilization tracker.
+func (u *TelemetryUtilizationMonitor) Start() {
+ if u.started {
+ return
+ }
+ u.started = true
+ u.ut.Started()
+}
+
+// Stop tracks a finish event in the utilization tracker.
+func (u *TelemetryUtilizationMonitor) Stop() {
+ if !u.started {
+ return
+ }
+ u.started = false
+ u.ut.Finished()
+}
+
+// Cancel stops the monitor.
+func (u *TelemetryUtilizationMonitor) Cancel() {
+ u.cancel()
+ u.ut.Stop()
+}
+
+func startTrackerTicker(ut *utilizationtracker.UtilizationTracker, interval time.Duration) func() {
+ ticker := time.NewTicker(interval)
+ cancel := make(chan struct{}, 1)
+ done := make(chan struct{})
+ go func() {
+ defer ticker.Stop()
+ defer close(done)
+ for {
+ select {
+ case <-ticker.C:
+ ut.Tick()
+ case <-cancel:
+ return
+ }
+ }
+ }()
+
+ return func() {
+ cancel <- struct{}{}
+ <-done // make sure Tick will not be called after we return.
+ }
+}
+
+func (u *TelemetryUtilizationMonitor) startUtilizationUpdater() {
+ TlmUtilizationRatio.Set(0, u.name, u.instance)
+ go func() {
+ for value := range u.ut.Output {
+ TlmUtilizationRatio.Set(value, u.name, u.instance)
+ }
+ }()
+}
diff --git a/pkg/logs/metrics/utilization_monitor_test.go b/pkg/logs/metrics/utilization_monitor_test.go
new file mode 100644
index 0000000000000..c549cfaaab55a
--- /dev/null
+++ b/pkg/logs/metrics/utilization_monitor_test.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package metrics
+
+import (
+ "testing"
+)
+
+func TestUtilizationMonitorLifecycle(_ *testing.T) {
+ // The core logic of the UtilizationMonitor is tested in the utilizationtracker package.
+ // This test just ensures the lifecycle methods don't block.
+ um := NewTelemetryUtilizationMonitor("", "")
+ um.Start()
+ um.Stop()
+ um.Cancel()
+}
diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod
index bcabc871490b5..fdb27aa451753 100644
--- a/pkg/logs/pipeline/go.mod
+++ b/pkg/logs/pipeline/go.mod
@@ -51,6 +51,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../version
)
@@ -59,10 +60,12 @@ require (
github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3
github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/config/model v0.57.0
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0
github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3
@@ -83,11 +86,9 @@ require (
github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect
- github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect
@@ -103,6 +104,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
diff --git a/pkg/logs/pipeline/mock/mock.go b/pkg/logs/pipeline/mock/mock.go
index 3d07560754a79..448ea1fb2416f 100644
--- a/pkg/logs/pipeline/mock/mock.go
+++ b/pkg/logs/pipeline/mock/mock.go
@@ -10,6 +10,7 @@ import (
"context"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/pipeline"
)
@@ -52,3 +53,8 @@ func (p *mockProvider) Flush(_ context.Context) {}
func (p *mockProvider) NextPipelineChan() chan *message.Message {
return p.msgChan
}
+
+// NextPipelineChanWithInstance returns the next pipeline
+func (p *mockProvider) NextPipelineChanWithMonitor() (chan *message.Message, metrics.PipelineMonitor) {
+ return p.msgChan, metrics.NewNoopPipelineMonitor("")
+}
diff --git a/pkg/logs/pipeline/pipeline.go b/pkg/logs/pipeline/pipeline.go
index 0a050d38481ad..b0136dac860d9 100644
--- a/pkg/logs/pipeline/pipeline.go
+++ b/pkg/logs/pipeline/pipeline.go
@@ -8,17 +8,19 @@ package pipeline
import (
"context"
- "fmt"
+ "strconv"
"sync"
"github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/logs/client"
"github.com/DataDog/datadog-agent/pkg/logs/client/http"
"github.com/DataDog/datadog-agent/pkg/logs/client/tcp"
"github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/processor"
"github.com/DataDog/datadog-agent/pkg/logs/sender"
"github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
@@ -26,13 +28,14 @@ import (
// Pipeline processes and sends messages to the backend
type Pipeline struct {
- InputChan chan *message.Message
- flushChan chan struct{}
- processor *processor.Processor
- strategy sender.Strategy
- sender *sender.Sender
- serverless bool
- flushWg *sync.WaitGroup
+ InputChan chan *message.Message
+ flushChan chan struct{}
+ processor *processor.Processor
+ strategy sender.Strategy
+ sender *sender.Sender
+ serverless bool
+ flushWg *sync.WaitGroup
+ pipelineMonitor metrics.PipelineMonitor
}
// NewPipeline returns a new Pipeline
@@ -53,10 +56,11 @@ func NewPipeline(outputChan chan *message.Payload,
senderDoneChan = make(chan *sync.WaitGroup)
flushWg = &sync.WaitGroup{}
}
+ pipelineMonitor := metrics.NewTelemetryPipelineMonitor(strconv.Itoa(pipelineID))
- mainDestinations := getDestinations(endpoints, destinationsContext, pipelineID, serverless, senderDoneChan, status, cfg)
+ mainDestinations := getDestinations(endpoints, destinationsContext, pipelineMonitor, serverless, senderDoneChan, status, cfg)
- strategyInput := make(chan *message.Message, config.ChanSize)
+ strategyInput := make(chan *message.Message, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size"))
senderInput := make(chan *message.Payload, 1) // Only buffer 1 message since payloads can be large
flushChan := make(chan struct{})
@@ -73,22 +77,23 @@ func NewPipeline(outputChan chan *message.Payload,
encoder = processor.RawEncoder
}
- strategy := getStrategy(strategyInput, senderInput, flushChan, endpoints, serverless, flushWg, pipelineID)
- logsSender = sender.NewSender(cfg, senderInput, outputChan, mainDestinations, config.DestinationPayloadChanSize, senderDoneChan, flushWg)
+ strategy := getStrategy(strategyInput, senderInput, flushChan, endpoints, serverless, flushWg, pipelineMonitor)
+ logsSender = sender.NewSender(cfg, senderInput, outputChan, mainDestinations, pkgconfigsetup.Datadog().GetInt("logs_config.payload_channel_size"), senderDoneChan, flushWg, pipelineMonitor)
- inputChan := make(chan *message.Message, config.ChanSize)
+ inputChan := make(chan *message.Message, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size"))
processor := processor.New(cfg, inputChan, strategyInput, processingRules,
- encoder, diagnosticMessageReceiver, hostname, pipelineID)
+ encoder, diagnosticMessageReceiver, hostname, pipelineMonitor)
return &Pipeline{
- InputChan: inputChan,
- flushChan: flushChan,
- processor: processor,
- strategy: strategy,
- sender: logsSender,
- serverless: serverless,
- flushWg: flushWg,
+ InputChan: inputChan,
+ flushChan: flushChan,
+ processor: processor,
+ strategy: strategy,
+ sender: logsSender,
+ serverless: serverless,
+ flushWg: flushWg,
+ pipelineMonitor: pipelineMonitor,
}
}
@@ -117,25 +122,25 @@ func (p *Pipeline) Flush(ctx context.Context) {
}
}
-func getDestinations(endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, pipelineID int, serverless bool, senderDoneChan chan *sync.WaitGroup, status statusinterface.Status, cfg pkgconfigmodel.Reader) *client.Destinations {
+func getDestinations(endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, pipelineMonitor metrics.PipelineMonitor, serverless bool, senderDoneChan chan *sync.WaitGroup, status statusinterface.Status, cfg pkgconfigmodel.Reader) *client.Destinations {
reliable := []client.Destination{}
additionals := []client.Destination{}
if endpoints.UseHTTP {
for i, endpoint := range endpoints.GetReliableEndpoints() {
- telemetryName := fmt.Sprintf("logs_%d_reliable_%d", pipelineID, i)
+ destMeta := client.NewDestinationMetadata("logs", pipelineMonitor.ID(), "reliable", strconv.Itoa(i))
if serverless {
- reliable = append(reliable, http.NewSyncDestination(endpoint, http.JSONContentType, destinationsContext, senderDoneChan, telemetryName, cfg))
+ reliable = append(reliable, http.NewSyncDestination(endpoint, http.JSONContentType, destinationsContext, senderDoneChan, destMeta, cfg))
} else {
- reliable = append(reliable, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, cfg))
+ reliable = append(reliable, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, destMeta, cfg, pipelineMonitor))
}
}
for i, endpoint := range endpoints.GetUnReliableEndpoints() {
- telemetryName := fmt.Sprintf("logs_%d_unreliable_%d", pipelineID, i)
+ destMeta := client.NewDestinationMetadata("logs", pipelineMonitor.ID(), "unreliable", strconv.Itoa(i))
if serverless {
- additionals = append(additionals, http.NewSyncDestination(endpoint, http.JSONContentType, destinationsContext, senderDoneChan, telemetryName, cfg))
+ additionals = append(additionals, http.NewSyncDestination(endpoint, http.JSONContentType, destinationsContext, senderDoneChan, destMeta, cfg))
} else {
- additionals = append(additionals, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, cfg))
+ additionals = append(additionals, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, destMeta, cfg, pipelineMonitor))
}
}
return client.NewDestinations(reliable, additionals)
@@ -151,13 +156,13 @@ func getDestinations(endpoints *config.Endpoints, destinationsContext *client.De
}
//nolint:revive // TODO(AML) Fix revive linter
-func getStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, flushChan chan struct{}, endpoints *config.Endpoints, serverless bool, flushWg *sync.WaitGroup, _ int) sender.Strategy {
+func getStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, flushChan chan struct{}, endpoints *config.Endpoints, serverless bool, flushWg *sync.WaitGroup, pipelineMonitor metrics.PipelineMonitor) sender.Strategy {
if endpoints.UseHTTP || serverless {
encoder := sender.IdentityContentType
if endpoints.Main.UseCompression {
encoder = sender.NewGzipContentEncoding(endpoints.Main.CompressionLevel)
}
- return sender.NewBatchStrategy(inputChan, outputChan, flushChan, serverless, flushWg, sender.ArraySerializer, endpoints.BatchWait, endpoints.BatchMaxSize, endpoints.BatchMaxContentSize, "logs", encoder)
+ return sender.NewBatchStrategy(inputChan, outputChan, flushChan, serverless, flushWg, sender.ArraySerializer, endpoints.BatchWait, endpoints.BatchMaxSize, endpoints.BatchMaxContentSize, "logs", encoder, pipelineMonitor)
}
return sender.NewStreamStrategy(inputChan, outputChan, sender.IdentityContentType)
}
diff --git a/pkg/logs/pipeline/provider.go b/pkg/logs/pipeline/provider.go
index 54d3b947a1313..9ee6ec8a5dfa0 100644
--- a/pkg/logs/pipeline/provider.go
+++ b/pkg/logs/pipeline/provider.go
@@ -18,6 +18,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/client"
"github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/sds"
"github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
"github.com/DataDog/datadog-agent/pkg/util/log"
@@ -32,6 +33,7 @@ type Provider interface {
ReconfigureSDSAgentConfig(config []byte) (bool, error)
StopSDSProcessing() error
NextPipelineChan() chan *message.Message
+ NextPipelineChanWithMonitor() (chan *message.Message, metrics.PipelineMonitor)
// Flush flushes all pipeline contained in this Provider
Flush(ctx context.Context)
}
@@ -181,6 +183,17 @@ func (p *provider) NextPipelineChan() chan *message.Message {
return nextPipeline.InputChan
}
+// NextPipelineChanWithMonitor returns the next pipeline input channel with it's monitor.
+func (p *provider) NextPipelineChanWithMonitor() (chan *message.Message, metrics.PipelineMonitor) {
+ pipelinesLen := len(p.pipelines)
+ if pipelinesLen == 0 {
+ return nil, nil
+ }
+ index := p.currentPipelineIndex.Inc() % uint32(pipelinesLen)
+ nextPipeline := p.pipelines[index]
+ return nextPipeline.InputChan, nextPipeline.pipelineMonitor
+}
+
// Flush flushes synchronously all the contained pipeline of this provider.
func (p *provider) Flush(ctx context.Context) {
for _, p := range p.pipelines {
diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod
index aed531f08a410..6822a25264eb9 100644
--- a/pkg/logs/processor/go.mod
+++ b/pkg/logs/processor/go.mod
@@ -42,6 +42,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../version
)
@@ -83,11 +84,13 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect
github.com/DataDog/viper v1.13.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
diff --git a/pkg/logs/processor/go.sum b/pkg/logs/processor/go.sum
index 3bb0e66aa4a99..8cd52742efd7b 100644
--- a/pkg/logs/processor/go.sum
+++ b/pkg/logs/processor/go.sum
@@ -16,6 +16,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
diff --git a/pkg/logs/processor/processor.go b/pkg/logs/processor/processor.go
index f66a8c0c48a4d..186b022fc0572 100644
--- a/pkg/logs/processor/processor.go
+++ b/pkg/logs/processor/processor.go
@@ -26,7 +26,6 @@ const UnstructuredProcessingMetricName = "datadog.logs_agent.tailer.unstructured
// A Processor updates messages from an inputChan and pushes
// in an outputChan.
type Processor struct {
- pipelineID int
inputChan chan *message.Message
outputChan chan *message.Message // strategy input
// ReconfigChan transports rules to use in order to reconfigure
@@ -40,6 +39,10 @@ type Processor struct {
hostname hostnameinterface.Component
sds sdsProcessor
+
+ // Telemetry
+ pipelineMonitor metrics.PipelineMonitor
+ utilization metrics.UtilizationMonitor
}
type sdsProcessor struct {
@@ -58,13 +61,12 @@ type sdsProcessor struct {
// New returns an initialized Processor.
func New(cfg pkgconfigmodel.Reader, inputChan, outputChan chan *message.Message, processingRules []*config.ProcessingRule,
encoder Encoder, diagnosticMessageReceiver diagnostic.MessageReceiver, hostname hostnameinterface.Component,
- pipelineID int) *Processor {
+ pipelineMonitor metrics.PipelineMonitor) *Processor {
waitForSDSConfig := sds.ShouldBufferUntilSDSConfiguration(cfg)
maxBufferSize := sds.WaitForConfigurationBufferMaxSize(cfg)
return &Processor{
- pipelineID: pipelineID,
inputChan: inputChan,
outputChan: outputChan, // strategy input
ReconfigChan: make(chan sds.ReconfigureOrder),
@@ -73,12 +75,14 @@ func New(cfg pkgconfigmodel.Reader, inputChan, outputChan chan *message.Message,
done: make(chan struct{}),
diagnosticMessageReceiver: diagnosticMessageReceiver,
hostname: hostname,
+ pipelineMonitor: pipelineMonitor,
+ utilization: pipelineMonitor.MakeUtilizationMonitor("processor"),
sds: sdsProcessor{
// will immediately starts buffering if it has been configured as so
buffering: waitForSDSConfig,
maxBufferSize: maxBufferSize,
- scanner: sds.CreateScanner(pipelineID),
+ scanner: sds.CreateScanner(pipelineMonitor.ID()),
},
}
}
@@ -124,6 +128,7 @@ func (p *Processor) Flush(ctx context.Context) {
func (p *Processor) run() {
defer func() {
p.done <- struct{}{}
+ p.utilization.Cancel()
}()
for {
@@ -217,6 +222,9 @@ func (s *sdsProcessor) resetBuffer() {
}
func (p *Processor) processMessage(msg *message.Message) {
+ p.utilization.Start()
+ defer p.utilization.Stop()
+ defer p.pipelineMonitor.ReportComponentEgress(msg, "processor")
metrics.LogsDecoded.Add(1)
metrics.TlmLogsDecoded.Inc()
@@ -241,8 +249,11 @@ func (p *Processor) processMessage(msg *message.Message) {
return
}
+ p.utilization.Stop() // Explicitly call stop here to avoid counting writing on the output channel as processing time
p.outputChan <- msg
+ p.pipelineMonitor.ReportComponentIngress(msg, "strategy")
}
+
}
// applyRedactingRules returns given a message if we should process it or not,
diff --git a/pkg/logs/processor/processor_test.go b/pkg/logs/processor/processor_test.go
index bb2ff56b02461..236246c174c14 100644
--- a/pkg/logs/processor/processor_test.go
+++ b/pkg/logs/processor/processor_test.go
@@ -17,6 +17,7 @@ import (
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/sds"
"github.com/DataDog/datadog-agent/pkg/logs/sources"
)
@@ -314,6 +315,7 @@ func TestBuffering(t *testing.T) {
}
hostnameComponent, _ := hostnameinterface.NewMock("testHostnameFromEnvVar")
+ pm := metrics.NewNoopPipelineMonitor("")
p := &Processor{
encoder: JSONEncoder,
@@ -326,8 +328,10 @@ func TestBuffering(t *testing.T) {
sds: sdsProcessor{
maxBufferSize: len("hello1world") + len("hello2world") + len("hello3world") + 1,
buffering: true,
- scanner: sds.CreateScanner(42),
+ scanner: sds.CreateScanner("42"),
},
+ pipelineMonitor: pm,
+ utilization: pm.MakeUtilizationMonitor("processor"),
}
var processedMessages atomic.Int32
diff --git a/pkg/logs/schedulers/cca/scheduler_test.go b/pkg/logs/schedulers/cca/scheduler_test.go
index 0d4c884239048..0ab1cdb18175b 100644
--- a/pkg/logs/schedulers/cca/scheduler_test.go
+++ b/pkg/logs/schedulers/cca/scheduler_test.go
@@ -16,8 +16,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery"
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/autodiscoveryimpl"
"github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config"
@@ -33,8 +32,7 @@ func setup(t *testing.T) (scheduler *Scheduler, ac autodiscovery.Component, spy
autodiscoveryimpl.MockModule(),
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
core.MockBundle(),
- fx.Supply(tagger.NewFakeTaggerParams()),
- fx.Provide(taggerimpl.NewMock),
+ fx.Provide(taggermock.NewMock),
)
scheduler = New(ac).(*Scheduler)
spy = &schedulers.MockSourceManager{}
diff --git a/pkg/logs/sds/scanner.go b/pkg/logs/sds/scanner.go
index 581fe810a7fbb..b0caf689efbfd 100644
--- a/pkg/logs/sds/scanner.go
+++ b/pkg/logs/sds/scanner.go
@@ -11,15 +11,15 @@ package sds
import (
"encoding/json"
"fmt"
- "strconv"
"strings"
"sync"
"time"
+ sds "github.com/DataDog/dd-sensitive-data-scanner/sds-go/go"
+
"github.com/DataDog/datadog-agent/pkg/logs/message"
"github.com/DataDog/datadog-agent/pkg/telemetry"
"github.com/DataDog/datadog-agent/pkg/util/log"
- sds "github.com/DataDog/dd-sensitive-data-scanner/sds-go/go"
)
const ScannedTag = "sds_agent:true"
@@ -34,7 +34,7 @@ var (
tlmSDSReconfigSuccess = telemetry.NewCounterWithOpts("sds", "reconfiguration_success", []string{"pipeline", "type"},
"Count of SDS reconfiguration success.", telemetry.Options{DefaultMetric: true})
tlmSDSProcessingLatency = telemetry.NewSimpleHistogram("sds", "processing_latency", "Processing latency histogram",
- []float64{10, 250, 500, 2000, 5000, 10000}) // unit: us
+ []float64{10, 250, 500, 2000, 5000, 10000}) // unit: us
)
// Scanner wraps an SDS Scanner implementation, adds reconfiguration
@@ -63,8 +63,8 @@ type Scanner struct {
// CreateScanner creates an SDS scanner.
// Use `Reconfigure` to configure it manually.
-func CreateScanner(pipelineID int) *Scanner {
- scanner := &Scanner{pipelineID: strconv.Itoa(pipelineID)}
+func CreateScanner(pipelineID string) *Scanner {
+ scanner := &Scanner{pipelineID: pipelineID}
log.Debugf("creating a new SDS scanner (internal id: %p)", scanner)
return scanner
}
diff --git a/pkg/logs/sds/scanner_nosds.go b/pkg/logs/sds/scanner_nosds.go
index 0f1d256f6917a..c1db02cdea4b7 100644
--- a/pkg/logs/sds/scanner_nosds.go
+++ b/pkg/logs/sds/scanner_nosds.go
@@ -24,7 +24,7 @@ type Match struct {
}
// CreateScanner creates a scanner for unsupported platforms/architectures.
-func CreateScanner(_ int) *Scanner {
+func CreateScanner(_ string) *Scanner {
return nil
}
diff --git a/pkg/logs/sds/scanner_test.go b/pkg/logs/sds/scanner_test.go
index bf27ea97ae8e0..4e099d2aec7cb 100644
--- a/pkg/logs/sds/scanner_test.go
+++ b/pkg/logs/sds/scanner_test.go
@@ -13,9 +13,10 @@ import (
"testing"
"time"
- "github.com/DataDog/datadog-agent/pkg/logs/message"
sds "github.com/DataDog/dd-sensitive-data-scanner/sds-go/go"
"github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
)
func TestCreateScanner(t *testing.T) {
@@ -68,7 +69,7 @@ func TestCreateScanner(t *testing.T) {
// scanner creation
// -----
- s := CreateScanner(0)
+ s := CreateScanner("")
require.NotNil(s, "the scanner should not be nil after a creation")
@@ -245,7 +246,7 @@ func TestEmptyConfiguration(t *testing.T) {
]}
`)
- s := CreateScanner(0)
+ s := CreateScanner("")
require.NotNil(s, "the scanner should not be nil after a creation")
@@ -350,7 +351,7 @@ func TestIsReady(t *testing.T) {
// scanner creation
// -----
- s := CreateScanner(0)
+ s := CreateScanner("")
require.NotNil(s, "the scanner should not be nil after a creation")
require.False(s.IsReady(), "at this stage, the scanner should not be considered ready, no definitions received")
@@ -420,7 +421,7 @@ func TestScan(t *testing.T) {
// scanner creation
// -----
- s := CreateScanner(0)
+ s := CreateScanner("")
require.NotNil(s, "the returned scanner should not be nil")
isActive, _ := s.Reconfigure(ReconfigureOrder{
@@ -509,7 +510,7 @@ func TestCloseCycleScan(t *testing.T) {
// -----
for i := 0; i < 10; i++ {
- s := CreateScanner(0)
+ s := CreateScanner("")
require.NotNil(s, "the returned scanner should not be nil")
_, _ = s.Reconfigure(ReconfigureOrder{
diff --git a/pkg/logs/sender/batch_strategy.go b/pkg/logs/sender/batch_strategy.go
index 4949f4a4e708f..cfb2ef8655d82 100644
--- a/pkg/logs/sender/batch_strategy.go
+++ b/pkg/logs/sender/batch_strategy.go
@@ -13,6 +13,7 @@ import (
"github.com/benbjohnson/clock"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/telemetry"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -36,6 +37,10 @@ type batchStrategy struct {
contentEncoding ContentEncoding
stopChan chan struct{} // closed when the goroutine has finished
clock clock.Clock
+
+ // Telemtry
+ pipelineMonitor metrics.PipelineMonitor
+ utilization metrics.UtilizationMonitor
}
// NewBatchStrategy returns a new batch concurrent strategy with the specified batch & content size limits
@@ -49,8 +54,9 @@ func NewBatchStrategy(inputChan chan *message.Message,
maxBatchSize int,
maxContentSize int,
pipelineName string,
- contentEncoding ContentEncoding) Strategy {
- return newBatchStrategyWithClock(inputChan, outputChan, flushChan, serverless, flushWg, serializer, batchWait, maxBatchSize, maxContentSize, pipelineName, clock.New(), contentEncoding)
+ contentEncoding ContentEncoding,
+ pipelineMonitor metrics.PipelineMonitor) Strategy {
+ return newBatchStrategyWithClock(inputChan, outputChan, flushChan, serverless, flushWg, serializer, batchWait, maxBatchSize, maxContentSize, pipelineName, clock.New(), contentEncoding, pipelineMonitor)
}
func newBatchStrategyWithClock(inputChan chan *message.Message,
@@ -64,7 +70,8 @@ func newBatchStrategyWithClock(inputChan chan *message.Message,
maxContentSize int,
pipelineName string,
clock clock.Clock,
- contentEncoding ContentEncoding) Strategy {
+ contentEncoding ContentEncoding,
+ pipelineMonitor metrics.PipelineMonitor) Strategy {
return &batchStrategy{
inputChan: inputChan,
@@ -79,6 +86,8 @@ func newBatchStrategyWithClock(inputChan chan *message.Message,
stopChan: make(chan struct{}),
pipelineName: pipelineName,
clock: clock,
+ pipelineMonitor: pipelineMonitor,
+ utilization: pipelineMonitor.MakeUtilizationMonitor("strategy"),
}
}
@@ -98,6 +107,7 @@ func (s *batchStrategy) Start() {
defer func() {
s.flushBuffer(s.outputChan)
flushTicker.Stop()
+ s.utilization.Cancel()
close(s.stopChan)
}()
for {
@@ -144,6 +154,7 @@ func (s *batchStrategy) flushBuffer(outputChan chan *message.Payload) {
if s.buffer.IsEmpty() {
return
}
+ s.utilization.Start()
messages := s.buffer.GetMessages()
s.buffer.Clear()
// Logging specifically for DBM pipelines, which seem to fail to send more often than other pipelines.
@@ -161,6 +172,7 @@ func (s *batchStrategy) sendMessages(messages []*message.Message, outputChan cha
encodedPayload, err := s.contentEncoding.encode(serializedMessage)
if err != nil {
log.Warn("Encoding failed - dropping payload", err)
+ s.utilization.Stop()
return
}
@@ -169,10 +181,14 @@ func (s *batchStrategy) sendMessages(messages []*message.Message, outputChan cha
s.flushWg.Add(1)
}
- outputChan <- &message.Payload{
+ p := &message.Payload{
Messages: messages,
Encoded: encodedPayload,
Encoding: s.contentEncoding.name(),
UnencodedSize: len(serializedMessage),
}
+ s.utilization.Stop()
+ outputChan <- p
+ s.pipelineMonitor.ReportComponentEgress(p, "strategy")
+ s.pipelineMonitor.ReportComponentIngress(p, "sender")
}
diff --git a/pkg/logs/sender/batch_strategy_test.go b/pkg/logs/sender/batch_strategy_test.go
index ff1f6bae1b107..34cb6be7aa4e9 100644
--- a/pkg/logs/sender/batch_strategy_test.go
+++ b/pkg/logs/sender/batch_strategy_test.go
@@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
)
func TestBatchStrategySendsPayloadWhenBufferIsFull(t *testing.T) {
@@ -20,7 +21,7 @@ func TestBatchStrategySendsPayloadWhenBufferIsFull(t *testing.T) {
output := make(chan *message.Payload)
flushChan := make(chan struct{})
- s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{})
+ s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
s.Start()
message1 := message.NewMessage([]byte("a"), nil, "", 0)
@@ -52,7 +53,7 @@ func TestBatchStrategySendsPayloadWhenBufferIsOutdated(t *testing.T) {
timerInterval := 100 * time.Millisecond
clk := clock.NewMock()
- s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, timerInterval, 100, 100, "test", clk, &identityContentType{})
+ s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, timerInterval, 100, 100, "test", clk, &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
s.Start()
for round := 0; round < 3; round++ {
@@ -77,7 +78,7 @@ func TestBatchStrategySendsPayloadWhenClosingInput(t *testing.T) {
flushChan := make(chan struct{})
clk := clock.NewMock()
- s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", clk, &identityContentType{})
+ s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", clk, &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
s.Start()
message := message.NewMessage([]byte("a"), nil, "", 0)
@@ -102,7 +103,7 @@ func TestBatchStrategyShouldNotBlockWhenStoppingGracefully(t *testing.T) {
output := make(chan *message.Payload)
flushChan := make(chan struct{})
- s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{})
+ s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
s.Start()
message := message.NewMessage([]byte{}, nil, "", 0)
@@ -126,7 +127,7 @@ func TestBatchStrategySynchronousFlush(t *testing.T) {
// batch size is large so it will not flush until we trigger it manually
// flush time is large so it won't automatically trigger during this test
- strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{})
+ strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
strategy.Start()
// all of these messages will get buffered
@@ -171,7 +172,7 @@ func TestBatchStrategyFlushChannel(t *testing.T) {
// batch size is large so it will not flush until we trigger it manually
// flush time is large so it won't automatically trigger during this test
- strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{})
+ strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor(""))
strategy.Start()
// all of these messages will get buffered
diff --git a/pkg/logs/sender/destination_sender_test.go b/pkg/logs/sender/destination_sender_test.go
index d2ab54715a4f0..3aa930e437e54 100644
--- a/pkg/logs/sender/destination_sender_test.go
+++ b/pkg/logs/sender/destination_sender_test.go
@@ -13,6 +13,7 @@ import (
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
"github.com/DataDog/datadog-agent/pkg/logs/message"
)
@@ -32,6 +33,10 @@ func (m *mockDestination) Target() string {
return "mock-dest"
}
+func (m *mockDestination) Metadata() *client.DestinationMetadata {
+ return client.NewNoopDestinationMetadata()
+}
+
func (m *mockDestination) Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{}) {
m.input = input
m.output = output
diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod
index 750b501605d92..8a1b84cfc1339 100644
--- a/pkg/logs/sender/go.mod
+++ b/pkg/logs/sender/go.mod
@@ -44,6 +44,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil
github.com/DataDog/datadog-agent/pkg/version => ../../version
)
@@ -54,6 +55,7 @@ require (
github.com/DataDog/datadog-agent/pkg/config/model v0.57.0
github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3
+ github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3
github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3
@@ -73,7 +75,6 @@ require (
github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect
github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect
- github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/executable v0.57.1 // indirect
@@ -87,6 +88,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect
github.com/DataDog/viper v1.13.5 // indirect
diff --git a/pkg/logs/sender/sender.go b/pkg/logs/sender/sender.go
index 31fa4db0bb382..48e9c6b22d936 100644
--- a/pkg/logs/sender/sender.go
+++ b/pkg/logs/sender/sender.go
@@ -13,6 +13,7 @@ import (
pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
"github.com/DataDog/datadog-agent/pkg/logs/client"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/telemetry"
)
@@ -38,10 +39,13 @@ type Sender struct {
bufferSize int
senderDoneChan chan *sync.WaitGroup
flushWg *sync.WaitGroup
+
+ pipelineMonitor metrics.PipelineMonitor
+ utilization metrics.UtilizationMonitor
}
// NewSender returns a new sender.
-func NewSender(config pkgconfigmodel.Reader, inputChan chan *message.Payload, outputChan chan *message.Payload, destinations *client.Destinations, bufferSize int, senderDoneChan chan *sync.WaitGroup, flushWg *sync.WaitGroup) *Sender {
+func NewSender(config pkgconfigmodel.Reader, inputChan chan *message.Payload, outputChan chan *message.Payload, destinations *client.Destinations, bufferSize int, senderDoneChan chan *sync.WaitGroup, flushWg *sync.WaitGroup, pipelineMonitor metrics.PipelineMonitor) *Sender {
return &Sender{
config: config,
inputChan: inputChan,
@@ -51,6 +55,10 @@ func NewSender(config pkgconfigmodel.Reader, inputChan chan *message.Payload, ou
bufferSize: bufferSize,
senderDoneChan: senderDoneChan,
flushWg: flushWg,
+
+ // Telemetry
+ pipelineMonitor: pipelineMonitor,
+ utilization: pipelineMonitor.MakeUtilizationMonitor("sender"),
}
}
@@ -73,6 +81,7 @@ func (s *Sender) run() {
unreliableDestinations := buildDestinationSenders(s.config, s.destinations.Unreliable, sink, s.bufferSize)
for payload := range s.inputChan {
+ s.utilization.Start()
var startInUse = time.Now()
senderDoneWg := &sync.WaitGroup{}
@@ -80,6 +89,9 @@ func (s *Sender) run() {
for !sent {
for _, destSender := range reliableDestinations {
if destSender.Send(payload) {
+ if destSender.destination.Metadata().ReportingEnabled {
+ s.pipelineMonitor.ReportComponentIngress(payload, destSender.destination.Metadata().MonitorTag())
+ }
sent = true
if s.senderDoneChan != nil {
senderDoneWg.Add(1)
@@ -121,6 +133,7 @@ func (s *Sender) run() {
inUse := float64(time.Since(startInUse) / time.Millisecond)
tlmSendWaitTime.Add(inUse)
+ s.utilization.Stop()
if s.senderDoneChan != nil && s.flushWg != nil {
// Wait for all destinations to finish sending the payload
@@ -128,6 +141,7 @@ func (s *Sender) run() {
// Decrement the wait group when this payload has been sent
s.flushWg.Done()
}
+ s.pipelineMonitor.ReportComponentEgress(payload, "sender")
}
// Cleanup the destinations
@@ -138,6 +152,7 @@ func (s *Sender) run() {
destSender.Stop()
}
close(sink)
+ s.utilization.Cancel()
s.done <- struct{}{}
}
diff --git a/pkg/logs/sender/sender_test.go b/pkg/logs/sender/sender_test.go
index 5fd09caf501d4..4f35558a46974 100644
--- a/pkg/logs/sender/sender_test.go
+++ b/pkg/logs/sender/sender_test.go
@@ -17,6 +17,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/logs/client/mock"
"github.com/DataDog/datadog-agent/pkg/logs/client/tcp"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/sources"
"github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
)
@@ -45,7 +46,7 @@ func TestSender(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{destination}, nil)
cfg := configmock.New(t)
- sender := NewSender(cfg, input, output, destinations, 0, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 0, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
expectedMessage := newMessage([]byte("fake line"), source, "")
@@ -73,7 +74,7 @@ func TestSenderSingleDestination(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{server.Destination}, nil)
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
@@ -103,7 +104,7 @@ func TestSenderDualReliableDestination(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{server1.Destination, server2.Destination}, nil)
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
@@ -138,7 +139,7 @@ func TestSenderUnreliableAdditionalDestination(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{server1.Destination}, []client.Destination{server2.Destination})
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
@@ -170,7 +171,7 @@ func TestSenderUnreliableStopsWhenMainFails(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{reliableServer.Destination}, []client.Destination{unreliableServer.Destination})
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
@@ -219,7 +220,7 @@ func TestSenderReliableContinuseWhenOneFails(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{reliableServer1.Destination, reliableServer2.Destination}, nil)
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
@@ -265,7 +266,7 @@ func TestSenderReliableWhenOneFailsAndRecovers(t *testing.T) {
destinations := client.NewDestinations([]client.Destination{reliableServer1.Destination, reliableServer2.Destination}, nil)
- sender := NewSender(cfg, input, output, destinations, 10, nil, nil)
+ sender := NewSender(cfg, input, output, destinations, 10, nil, nil, metrics.NewNoopPipelineMonitor(""))
sender.Start()
input <- &message.Payload{}
diff --git a/pkg/logs/tailers/docker/tailer.go b/pkg/logs/tailers/docker/tailer.go
index 7eb2b19b6ee64..e7766030bd8e6 100644
--- a/pkg/logs/tailers/docker/tailer.go
+++ b/pkg/logs/tailers/docker/tailer.go
@@ -15,7 +15,7 @@ import (
"sync"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/pkg/logs/internal/decoder"
diff --git a/pkg/logs/tailers/file/tailer.go b/pkg/logs/tailers/file/tailer.go
index b9d96b982fcf8..c3f5db51a7d3d 100644
--- a/pkg/logs/tailers/file/tailer.go
+++ b/pkg/logs/tailers/file/tailer.go
@@ -116,20 +116,22 @@ type Tailer struct {
// blocked sending to the tailer's outputChan.
stopForward context.CancelFunc
- info *status.InfoRegistry
- bytesRead *status.CountInfo
- movingSum *util.MovingSum
+ info *status.InfoRegistry
+ bytesRead *status.CountInfo
+ movingSum *util.MovingSum
+ PipelineMonitor metrics.PipelineMonitor
}
// TailerOptions holds all possible parameters that NewTailer requires in addition to optional parameters that can be optionally passed into. This can be used for more optional parameters if required in future
type TailerOptions struct {
- OutputChan chan *message.Message // Required
- File *File // Required
- SleepDuration time.Duration // Required
- Decoder *decoder.Decoder // Required
- Info *status.InfoRegistry // Required
- Rotated bool // Optional
- TagAdder tag.EntityTagAdder // Required
+ OutputChan chan *message.Message // Required
+ File *File // Required
+ SleepDuration time.Duration // Required
+ Decoder *decoder.Decoder // Required
+ Info *status.InfoRegistry // Required
+ Rotated bool // Optional
+ TagAdder tag.EntityTagAdder // Required
+ PipelineMonitor metrics.PipelineMonitor // Required
}
// NewTailer returns an initialized Tailer, read to be started.
@@ -182,6 +184,7 @@ func NewTailer(opts *TailerOptions) *Tailer {
info: opts.Info,
bytesRead: bytesRead,
movingSum: movingSum,
+ PipelineMonitor: opts.PipelineMonitor,
}
if fileRotated {
@@ -199,16 +202,17 @@ func addToTailerInfo(k, m string, tailerInfo *status.InfoRegistry) {
}
// NewRotatedTailer creates a new tailer that replaces this one, writing
-// messages to the same channel but using an updated file and decoder.
-func (t *Tailer) NewRotatedTailer(file *File, decoder *decoder.Decoder, info *status.InfoRegistry, tagAdder tag.EntityTagAdder) *Tailer {
+// messages to a new channel and using an updated file and decoder.
+func (t *Tailer) NewRotatedTailer(file *File, outputChan chan *message.Message, pipelineMonitor metrics.PipelineMonitor, decoder *decoder.Decoder, info *status.InfoRegistry, tagAdder tag.EntityTagAdder) *Tailer {
options := &TailerOptions{
- OutputChan: t.outputChan,
- File: file,
- SleepDuration: t.sleepDuration,
- Decoder: decoder,
- Info: info,
- Rotated: true,
- TagAdder: tagAdder,
+ OutputChan: outputChan,
+ File: file,
+ SleepDuration: t.sleepDuration,
+ Decoder: decoder,
+ Info: info,
+ Rotated: true,
+ TagAdder: tagAdder,
+ PipelineMonitor: pipelineMonitor,
}
return NewTailer(options)
@@ -359,13 +363,15 @@ func (t *Tailer) forwardMessages() {
if len(output.GetContent()) == 0 {
continue
}
+
+ msg := message.NewMessage(output.GetContent(), origin, output.Status, output.IngestionTimestamp)
// Make the write to the output chan cancellable to be able to stop the tailer
// after a file rotation when it is stuck on it.
// We don't return directly to keep the same shutdown sequence that in the
// normal case.
select {
- // XXX(remy): is it ok recreating a message like this here?
- case t.outputChan <- message.NewMessage(output.GetContent(), origin, output.Status, output.IngestionTimestamp):
+ case t.outputChan <- msg:
+ t.PipelineMonitor.ReportComponentIngress(msg, "processor")
case <-t.forwardContext.Done():
}
}
diff --git a/pkg/logs/tailers/file/tailer_nix.go b/pkg/logs/tailers/file/tailer_nix.go
index a4af026781133..681396e03fbca 100644
--- a/pkg/logs/tailers/file/tailer_nix.go
+++ b/pkg/logs/tailers/file/tailer_nix.go
@@ -56,6 +56,7 @@ func (t *Tailer) read() (int, error) {
return 0, nil
}
t.lastReadOffset.Add(int64(n))
- t.decoder.InputChan <- decoder.NewInput(inBuf[:n])
+ msg := decoder.NewInput(inBuf[:n])
+ t.decoder.InputChan <- msg
return n, nil
}
diff --git a/pkg/logs/tailers/file/tailer_test.go b/pkg/logs/tailers/file/tailer_test.go
index d26c17d25224c..794a6df4557f9 100644
--- a/pkg/logs/tailers/file/tailer_test.go
+++ b/pkg/logs/tailers/file/tailer_test.go
@@ -21,6 +21,7 @@ import (
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/logs/internal/decoder"
"github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
"github.com/DataDog/datadog-agent/pkg/logs/sources"
status "github.com/DataDog/datadog-agent/pkg/logs/status/utils"
)
@@ -57,11 +58,12 @@ func (suite *TailerTestSuite) SetupTest() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -111,11 +113,12 @@ func (suite *TailerTestSuite) TestTailerTimeDurationConfig() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
tailer := NewTailer(tailerOptions)
@@ -278,11 +281,12 @@ func (suite *TailerTestSuite) TestDirTagWhenTailingFiles() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, dirTaggedSource, true),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, dirTaggedSource, true),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -308,11 +312,12 @@ func (suite *TailerTestSuite) TestBuildTagsFileOnly() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, dirTaggedSource, false),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, dirTaggedSource, false),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -335,11 +340,12 @@ func (suite *TailerTestSuite) TestBuildTagsFileDir() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, dirTaggedSource, true),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, dirTaggedSource, true),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -366,11 +372,12 @@ func (suite *TailerTestSuite) TestTruncatedTag() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, source, true),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, source, true),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -398,11 +405,12 @@ func (suite *TailerTestSuite) TestMutliLineAutoDetect() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, suite.source.UnderlyingSource(), true),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, suite.source.UnderlyingSource(), true),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
suite.tailer = NewTailer(tailerOptions)
@@ -433,11 +441,12 @@ func (suite *TailerTestSuite) TestDidRotateNilFullpath() {
info := status.NewInfoRegistry()
tailerOptions := &TailerOptions{
- OutputChan: suite.outputChan,
- File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
- SleepDuration: sleepDuration,
- Decoder: decoder.NewDecoderFromSource(suite.source, info),
- Info: info,
+ OutputChan: suite.outputChan,
+ File: NewFile(suite.testPath, suite.source.UnderlyingSource(), false),
+ SleepDuration: sleepDuration,
+ Decoder: decoder.NewDecoderFromSource(suite.source, info),
+ Info: info,
+ PipelineMonitor: metrics.NewNoopPipelineMonitor(""),
}
tailer := NewTailer(tailerOptions)
diff --git a/pkg/logs/tailers/journald/docker_test.go b/pkg/logs/tailers/journald/docker_test.go
index 296fe5a88c859..1e2126ab9f5ca 100644
--- a/pkg/logs/tailers/journald/docker_test.go
+++ b/pkg/logs/tailers/journald/docker_test.go
@@ -13,14 +13,14 @@ import (
"github.com/coreos/go-systemd/sdjournal"
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/pkg/logs/sources"
)
func TestIsContainerEntry(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, false, fakeTagger)
var entry *sdjournal.JournalEntry
@@ -38,7 +38,7 @@ func TestIsContainerEntry(t *testing.T) {
func TestGetContainerID(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, false, fakeTagger)
entry := &sdjournal.JournalEntry{
diff --git a/pkg/logs/tailers/journald/tailer.go b/pkg/logs/tailers/journald/tailer.go
index b2009a0c8f24f..6bc07e0d2ef2c 100644
--- a/pkg/logs/tailers/journald/tailer.go
+++ b/pkg/logs/tailers/journald/tailer.go
@@ -16,7 +16,7 @@ import (
"github.com/coreos/go-systemd/sdjournal"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
"github.com/DataDog/datadog-agent/pkg/logs/internal/decoder"
"github.com/DataDog/datadog-agent/pkg/logs/internal/framer"
diff --git a/pkg/logs/tailers/journald/tailer_test.go b/pkg/logs/tailers/journald/tailer_test.go
index ac44f5f265cb0..37d9376dde76e 100644
--- a/pkg/logs/tailers/journald/tailer_test.go
+++ b/pkg/logs/tailers/journald/tailer_test.go
@@ -16,7 +16,7 @@ import (
"github.com/coreos/go-systemd/sdjournal"
"github.com/stretchr/testify/assert"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
configmock "github.com/DataDog/datadog-agent/pkg/config/mock"
"github.com/DataDog/datadog-agent/pkg/logs/message"
@@ -117,7 +117,7 @@ func (m *MockJournal) GetCursor() (string, error) {
func TestIdentifier(t *testing.T) {
var tailer *Tailer
var source *sources.LogSource
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
// expect default identifier
source = sources.NewLogSource("", &config.LogsConfig{})
@@ -136,7 +136,7 @@ func TestShouldDropEntry(t *testing.T) {
var source *sources.LogSource
var tailer *Tailer
var err error
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
// expect only the specified service units or matching entries to be dropped
source = sources.NewLogSource("", &config.LogsConfig{ExcludeSystemUnits: []string{"foo", "bar"}, ExcludeUserUnits: []string{"baz", "qux"}, ExcludeMatches: []string{"quux=quuz"}})
@@ -286,7 +286,7 @@ func TestShouldDropEntry(t *testing.T) {
func TestApplicationName(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
assert.Equal(t, "foo", tailer.getApplicationName(
@@ -331,7 +331,7 @@ func TestApplicationName(t *testing.T) {
func TestContent(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
_, marshaled := tailer.getContent(
@@ -363,7 +363,7 @@ func TestContent(t *testing.T) {
func TestSeverity(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
priorityValues := []string{"0", "1", "2", "3", "4", "5", "6", "7", "foo"}
@@ -381,7 +381,7 @@ func TestSeverity(t *testing.T) {
func TestApplicationNameShouldBeDockerForContainerEntries(t *testing.T) {
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
assert.Equal(t, "docker", tailer.getApplicationName(
@@ -400,7 +400,7 @@ func TestApplicationNameShouldBeShortImageForContainerEntries(t *testing.T) {
containerID := "bar"
source := sources.NewLogSource("", &config.LogsConfig{ContainerMode: true})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
assert.Equal(t, "testImage", tailer.getApplicationName(
@@ -423,7 +423,7 @@ func TestApplicationNameShouldBeDockerWhenTagNotFound(t *testing.T) {
containerID := "bar2"
source := sources.NewLogSource("", &config.LogsConfig{ContainerMode: true})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
assert.Equal(t, "docker", tailer.getApplicationName(
@@ -449,7 +449,7 @@ func TestWrongTypeFromCache(t *testing.T) {
cache.Cache.Set(getImageCacheKey(containerID), 10, 30*time.Second)
source := sources.NewLogSource("", &config.LogsConfig{ContainerMode: true})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, nil, true, fakeTagger)
assert.Equal(t, "testImage", tailer.getApplicationName(
@@ -493,7 +493,7 @@ func TestTailingMode(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
mockJournal := &MockJournal{m: m}
source := sources.NewLogSource("", tt.config)
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, nil, mockJournal, true, fakeTagger)
tailer.Start(tt.cursor)
@@ -518,7 +518,7 @@ func TestTailerCanTailJournal(t *testing.T) {
mockJournal := &MockJournal{m: &sync.Mutex{}}
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, make(chan *message.Message, 1), mockJournal, true, fakeTagger)
mockJournal.entries = append(mockJournal.entries, &sdjournal.JournalEntry{Fields: map[string]string{"MESSAGE": "foobar"}})
@@ -539,7 +539,7 @@ func TestTailerWithStructuredMessage(t *testing.T) {
mockJournal := &MockJournal{m: &sync.Mutex{}}
source := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailer := NewTailer(source, make(chan *message.Message, 1), mockJournal, false, fakeTagger)
mockJournal.entries = append(mockJournal.entries, &sdjournal.JournalEntry{Fields: map[string]string{
sdjournal.SD_JOURNAL_FIELD_MESSAGE: "foobar",
@@ -564,7 +564,7 @@ func TestTailerCompareUnstructuredAndStructured(t *testing.T) {
mockJournalV1 := &MockJournal{m: &sync.Mutex{}}
sourceV1 := sources.NewLogSource("", &config.LogsConfig{})
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
tailerV1 := NewTailer(sourceV1, make(chan *message.Message, 1), mockJournalV1, true, fakeTagger)
mockJournalV1.entries = append(mockJournalV1.entries, &sdjournal.JournalEntry{Fields: map[string]string{
sdjournal.SD_JOURNAL_FIELD_MESSAGE: "journald log message content",
@@ -604,7 +604,7 @@ func TestExpectedTagDuration(t *testing.T) {
mockConfig := configmock.New(t)
tags := []string{"tag1:value1"}
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := mock.SetupFakeTagger(t)
mockConfig.SetWithoutSource("tags", tags)
defer mockConfig.SetWithoutSource("tags", nil)
diff --git a/pkg/metrics/metricsource.go b/pkg/metrics/metricsource.go
index 9868f5925d086..1db0eaa342124 100644
--- a/pkg/metrics/metricsource.go
+++ b/pkg/metrics/metricsource.go
@@ -297,6 +297,7 @@ const (
MetricSourceKubeflow
MetricSourceAppgateSDP
MetricSourceAnyscale
+ MetricSourceMilvus
)
// String returns a string representation of MetricSource
@@ -852,6 +853,8 @@ func (ms MetricSource) String() string {
return "zenoh_router"
case MetricSourceAwsNeuron:
return "aws_neuron"
+ case MetricSourceMilvus:
+ return "milvus"
default:
return ""
}
@@ -1384,6 +1387,8 @@ func CheckNameToMetricSource(name string) MetricSource {
return MetricSourceSlurm
case "tibco_ems":
return MetricSourceTibcoEMS
+ case "milvus":
+ return MetricSourceMilvus
default:
return MetricSourceUnknown
}
diff --git a/pkg/network/ebpf/c/protocols/http2/decoding.h b/pkg/network/ebpf/c/protocols/http2/decoding.h
index 81d1f36af3d9c..d214b46a1b503 100644
--- a/pkg/network/ebpf/c/protocols/http2/decoding.h
+++ b/pkg/network/ebpf/c/protocols/http2/decoding.h
@@ -101,16 +101,17 @@ static __always_inline void pktbuf_skip_preface(pktbuf_t pkt) {
// Returns the telemetry pointer from the relevant map.
static __always_inline void* get_telemetry(pktbuf_t pkt) {
- const __u32 zero = 0;
+ const __u32 plaintext_ey = 0;
+ const __u32 tls_key = 1;
pktbuf_map_lookup_option_t map_lookup_telemetry_array[] = {
[PKTBUF_SKB] = {
.map = &http2_telemetry,
- .key = (void*)&zero,
+ .key = (void*)&plaintext_ey,
},
[PKTBUF_TLS] = {
- .map = &tls_http2_telemetry,
- .key = (void*)&zero,
+ .map = &http2_telemetry,
+ .key = (void*)&tls_key,
},
};
return pktbuf_map_lookup(pkt, map_lookup_telemetry_array);
diff --git a/pkg/network/ebpf/c/protocols/http2/maps-defs.h b/pkg/network/ebpf/c/protocols/http2/maps-defs.h
index ddeff6742c6e5..949afce785df7 100644
--- a/pkg/network/ebpf/c/protocols/http2/maps-defs.h
+++ b/pkg/network/ebpf/c/protocols/http2/maps-defs.h
@@ -43,11 +43,8 @@ BPF_PERCPU_ARRAY_MAP(http2_scratch_buffer, http2_event_t, 1)
/* Allocating a ctx on the heap, in order to save the ctx between the current stream. */
BPF_PERCPU_ARRAY_MAP(http2_ctx_heap, http2_ctx_t, 1)
-/* This map is used for telemetry in kernelspace
- * only key 0 is used
- * value is a http2 telemetry object
- */
-BPF_ARRAY_MAP(http2_telemetry, http2_telemetry_t, 1)
-BPF_ARRAY_MAP(tls_http2_telemetry, http2_telemetry_t, 1)
+// This map is used to gather telemetry data from the eBPF programs. Key 0 is used for plaintext traffic,
+// and key 1 is used for encrypted traffic.
+BPF_ARRAY_MAP(http2_telemetry, http2_telemetry_t, 2)
#endif
diff --git a/pkg/network/go/asmscan/scan.go b/pkg/network/go/asmscan/scan.go
index 94ae7ad09919d..5c55ba13905a8 100644
--- a/pkg/network/go/asmscan/scan.go
+++ b/pkg/network/go/asmscan/scan.go
@@ -7,11 +7,13 @@
package asmscan
import (
- "debug/elf"
+ "errors"
"fmt"
"golang.org/x/arch/arm64/arm64asm"
"golang.org/x/arch/x86/x86asm"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// ScanFunction finds the program counter (PC) positions of machine code instructions
@@ -37,7 +39,11 @@ import (
// - https://github.com/iovisor/bcc/issues/1320#issuecomment-407927542
// (which describes how this approach works as a workaround)
// - https://github.com/golang/go/issues/22008
-func ScanFunction(textSection *elf.Section, sym elf.Symbol, functionOffset uint64, scanInstructions func(data []byte) ([]uint64, error)) ([]uint64, error) {
+func ScanFunction(textSection *safeelf.Section, sym safeelf.Symbol, functionOffset uint64, scanInstructions func(data []byte) ([]uint64, error)) ([]uint64, error) {
+ if textSection.ReaderAt == nil {
+ return nil, errors.New("text section is not available in random-access form")
+ }
+
// Determine the offset in the section that the function starts at
lowPC := sym.Value
highPC := lowPC + sym.Size
diff --git a/pkg/network/go/bininspect/dwarf.go b/pkg/network/go/bininspect/dwarf.go
index 3723606a14754..745e89bfd6c12 100644
--- a/pkg/network/go/bininspect/dwarf.go
+++ b/pkg/network/go/bininspect/dwarf.go
@@ -9,11 +9,13 @@ package bininspect
import (
"debug/dwarf"
- "debug/elf"
"errors"
"fmt"
+
"github.com/DataDog/datadog-agent/pkg/network/go/dwarfutils"
"github.com/DataDog/datadog-agent/pkg/network/go/dwarfutils/locexpr"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
+
"github.com/go-delve/delve/pkg/dwarf/godwarf"
"github.com/go-delve/delve/pkg/dwarf/loclist"
)
@@ -28,7 +30,7 @@ type dwarfInspector struct {
// It also returns some additional relevant metadata about the given file.
// It is using the DWARF debug data to obtain information, and therefore should be run on elf files that contain debug
// data, like our test binaries.
-func InspectWithDWARF(elfFile *elf.File, functions []string, structFields []FieldIdentifier) (*Result, error) {
+func InspectWithDWARF(elfFile *safeelf.File, functions []string, structFields []FieldIdentifier) (*Result, error) {
if elfFile == nil {
return nil, ErrNilElf
}
@@ -281,7 +283,7 @@ func (d dwarfInspector) findStructOffsets(structFields []FieldIdentifier) (map[F
// starting at offset, for address pc.
// Adapted from github.com/go-delve/delve/pkg/proc.(*BinaryInfo).loclistEntry
func (d dwarfInspector) getLoclistEntry(offset int64, pc uint64) (*loclist.Entry, error) {
- debugInfoBytes, err := godwarf.GetDebugSectionElf(d.elf.file, "info")
+ debugInfoBytes, err := godwarf.GetDebugSectionElf(d.elf.file.File, "info")
if err != nil {
return nil, err
}
@@ -291,11 +293,11 @@ func (d dwarfInspector) getLoclistEntry(offset int64, pc uint64) (*loclist.Entry
return nil, err
}
- debugLocBytes, _ := godwarf.GetDebugSectionElf(d.elf.file, "loc")
+ debugLocBytes, _ := godwarf.GetDebugSectionElf(d.elf.file.File, "loc")
loclist2 := loclist.NewDwarf2Reader(debugLocBytes, int(d.elf.arch.PointerSize()))
- debugLoclistBytes, _ := godwarf.GetDebugSectionElf(d.elf.file, "loclists")
+ debugLoclistBytes, _ := godwarf.GetDebugSectionElf(d.elf.file.File, "loclists")
loclist5 := loclist.NewDwarf5Reader(debugLoclistBytes)
- debugAddrBytes, _ := godwarf.GetDebugSectionElf(d.elf.file, "addr")
+ debugAddrBytes, _ := godwarf.GetDebugSectionElf(d.elf.file.File, "addr")
debugAddrSection := godwarf.ParseAddr(debugAddrBytes)
var base uint64
diff --git a/pkg/network/go/bininspect/newproc.go b/pkg/network/go/bininspect/newproc.go
index 9af8349e854ca..a6400f56525b1 100644
--- a/pkg/network/go/bininspect/newproc.go
+++ b/pkg/network/go/bininspect/newproc.go
@@ -8,23 +8,23 @@
package bininspect
import (
- "debug/elf"
"errors"
"fmt"
"github.com/DataDog/datadog-agent/pkg/network/go/goid"
"github.com/DataDog/datadog-agent/pkg/network/go/goversion"
"github.com/DataDog/datadog-agent/pkg/util/common"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
type newProcessBinaryInspector struct {
elf elfMetadata
- symbols map[string]elf.Symbol
+ symbols map[string]safeelf.Symbol
goVersion goversion.GoVersion
}
// InspectNewProcessBinary process the given elf File, and returns the offsets of the given functions and structs.
-func InspectNewProcessBinary(elfFile *elf.File, functions map[string]FunctionConfiguration, structs map[FieldIdentifier]StructLookupFunction) (*Result, error) {
+func InspectNewProcessBinary(elfFile *safeelf.File, functions map[string]FunctionConfiguration, structs map[FieldIdentifier]StructLookupFunction) (*Result, error) {
if elfFile == nil {
return nil, errors.New("got nil elf file")
}
@@ -152,9 +152,9 @@ func (i *newProcessBinaryInspector) getRuntimeGAddrTLSOffset() (uint64, error) {
// - On ARM64 (but really, any architecture other than i386 and 86x64) the
// offset is calculated using runtime.tls_g and the formula is different.
- var tls *elf.Prog
+ var tls *safeelf.Prog
for _, prog := range i.elf.file.Progs {
- if prog.Type == elf.PT_TLS {
+ if prog.Type == safeelf.PT_TLS {
tls = prog
break
}
diff --git a/pkg/network/go/bininspect/pclntab.go b/pkg/network/go/bininspect/pclntab.go
index 3a898de9de102..98228f470bbdc 100644
--- a/pkg/network/go/bininspect/pclntab.go
+++ b/pkg/network/go/bininspect/pclntab.go
@@ -9,11 +9,12 @@ package bininspect
import (
"bytes"
- "debug/elf"
"encoding/binary"
"errors"
"fmt"
"io"
+
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
const (
@@ -51,12 +52,15 @@ var (
// This is used to lazy read from the pclntab section, as the pclntab is large and we don't want to read it all at once,
// or store it in memory.
type sectionAccess struct {
- section *elf.Section
+ section *safeelf.Section
baseOffset int64
}
// ReadAt reads len(p) bytes from the section starting at the given offset.
func (s *sectionAccess) ReadAt(outBuffer []byte, offset int64) (int, error) {
+ if s.section.ReaderAt == nil {
+ return 0, errors.New("section not available in random-access form")
+ }
return s.section.ReadAt(outBuffer, s.baseOffset+offset)
}
@@ -64,7 +68,7 @@ func (s *sectionAccess) ReadAt(outBuffer []byte, offset int64) (int, error) {
// Similar to LineTable struct in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L43
type pclntanSymbolParser struct {
// section is the pclntab section.
- section *elf.Section
+ section *safeelf.Section
// symbolFilter is the filter for the symbols.
symbolFilter symbolFilter
@@ -93,7 +97,7 @@ type pclntanSymbolParser struct {
}
// GetPCLNTABSymbolParser returns the matching symbols from the pclntab section.
-func GetPCLNTABSymbolParser(f *elf.File, symbolFilter symbolFilter) (map[string]*elf.Symbol, error) {
+func GetPCLNTABSymbolParser(f *safeelf.File, symbolFilter symbolFilter) (map[string]*safeelf.Symbol, error) {
section := f.Section(pclntabSectionName)
if section == nil {
return nil, ErrMissingPCLNTABSection
@@ -118,6 +122,9 @@ func GetPCLNTABSymbolParser(f *elf.File, symbolFilter symbolFilter) (map[string]
// parsePclntab parses the pclntab, setting the version and verifying the header.
// Based on parsePclnTab in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L194
func (p *pclntanSymbolParser) parsePclntab() error {
+ if p.section.ReaderAt == nil {
+ return errors.New("section not available in random-access form")
+ }
p.cachedVersion = ver11
pclntabHeader := make([]byte, 8)
@@ -219,9 +226,9 @@ func getFuncTableFieldSize(version version, ptrSize int) int {
// getSymbols returns the symbols from the pclntab section that match the symbol filter.
// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L300-L329
-func (p *pclntanSymbolParser) getSymbols() (map[string]*elf.Symbol, error) {
+func (p *pclntanSymbolParser) getSymbols() (map[string]*safeelf.Symbol, error) {
numWanted := p.symbolFilter.getNumWanted()
- symbols := make(map[string]*elf.Symbol, numWanted)
+ symbols := make(map[string]*safeelf.Symbol, numWanted)
data := sectionAccess{section: p.section}
for currentIdx := uint32(0); currentIdx < p.funcTableSize; currentIdx++ {
// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L315
@@ -237,7 +244,7 @@ func (p *pclntanSymbolParser) getSymbols() (map[string]*elf.Symbol, error) {
if funcName == "" {
continue
}
- symbols[funcName] = &elf.Symbol{
+ symbols[funcName] = &safeelf.Symbol{
Name: funcName,
}
if len(symbols) == numWanted {
diff --git a/pkg/network/go/bininspect/pclntab_test.go b/pkg/network/go/bininspect/pclntab_test.go
index ce47439733ef4..a37edc9908799 100644
--- a/pkg/network/go/bininspect/pclntab_test.go
+++ b/pkg/network/go/bininspect/pclntab_test.go
@@ -8,33 +8,35 @@
package bininspect
import (
- "debug/elf"
- "github.com/DataDog/datadog-agent/pkg/util/common"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
"os"
"strconv"
"strings"
"testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/pkg/util/common"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
const (
// Info is composed of the type and binding of the symbol. Type is the lower 4 bits and binding is the upper 4 bits.
// We are only interested in functions, which binding STB_GLOBAL (1) and type STT_FUNC (2).
// Hence, we are interested in symbols with Info 18.
- infoFunction = byte(elf.STB_GLOBAL)<<4 | byte(elf.STT_FUNC)
+ infoFunction = byte(safeelf.STB_GLOBAL)<<4 | byte(safeelf.STT_FUNC)
)
// TestGetPCLNTABSymbolParser tests the GetPCLNTABSymbolParser function with strings set symbol filter.
// We are looking to find all symbols of the current process executable and check if they are found in the PCLNTAB.
func TestGetPCLNTABSymbolParser(t *testing.T) {
currentPid := os.Getpid()
- f, err := elf.Open("/proc/" + strconv.Itoa(currentPid) + "/exe")
+ f, err := safeelf.Open("/proc/" + strconv.Itoa(currentPid) + "/exe")
require.NoError(t, err)
symbolSet := make(common.StringSet)
staticSymbols, _ := f.Symbols()
dynamicSymbols, _ := f.DynamicSymbols()
- for _, symbols := range [][]elf.Symbol{staticSymbols, dynamicSymbols} {
+ for _, symbols := range [][]safeelf.Symbol{staticSymbols, dynamicSymbols} {
for _, sym := range symbols {
if sym.Info != infoFunction {
continue
diff --git a/pkg/network/go/bininspect/symbol_filter.go b/pkg/network/go/bininspect/symbol_filter.go
index c0a296b43dd86..d96d6fe6bf703 100644
--- a/pkg/network/go/bininspect/symbol_filter.go
+++ b/pkg/network/go/bininspect/symbol_filter.go
@@ -8,10 +8,10 @@
package bininspect
import (
- "debug/elf"
"strings"
"github.com/DataDog/datadog-agent/pkg/util/common"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// symbolFilter is an interface for filtering symbols read from ELF files.
@@ -24,7 +24,7 @@ type symbolFilter interface {
want(symbol string) bool
// findMissing returns the list of symbol names which the filter wanted but were not found in the
// symbol map. This is only used for error messages.
- findMissing(map[string]elf.Symbol) []string
+ findMissing(map[string]safeelf.Symbol) []string
}
// stringSetSymbolFilter is a symbol filter which finds all the symbols in a
@@ -58,7 +58,7 @@ func (f stringSetSymbolFilter) want(symbol string) bool {
}
// findMissing gets the list of symbols which were missing. Only used for error prints.
-func (f stringSetSymbolFilter) findMissing(symbolByName map[string]elf.Symbol) []string {
+func (f stringSetSymbolFilter) findMissing(symbolByName map[string]safeelf.Symbol) []string {
missingSymbols := make([]string, 0, max(0, len(f.symbolSet)-len(symbolByName)))
for symbolName := range f.symbolSet {
if _, ok := symbolByName[symbolName]; !ok {
@@ -97,6 +97,6 @@ func (f prefixSymbolFilter) want(symbol string) bool {
// findMissing gets the list of symbols which were missing. Only used for error
// prints. Since we only know we were looking for a prefix, return that.
-func (f prefixSymbolFilter) findMissing(_ map[string]elf.Symbol) []string {
+func (f prefixSymbolFilter) findMissing(_ map[string]safeelf.Symbol) []string {
return []string{f.prefix}
}
diff --git a/pkg/network/go/bininspect/symbols.go b/pkg/network/go/bininspect/symbols.go
index 910bc37d3ff30..32af710f74046 100644
--- a/pkg/network/go/bininspect/symbols.go
+++ b/pkg/network/go/bininspect/symbols.go
@@ -8,7 +8,6 @@
package bininspect
import (
- "debug/elf"
"encoding/binary"
"errors"
"fmt"
@@ -19,6 +18,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/common"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
const (
@@ -32,6 +32,9 @@ const (
// redundant allocations. We get it as a parameter and not putting it as a global, to be thread safe among concurrent
// and parallel calls.
func getSymbolNameByEntry(sectionReader io.ReaderAt, startPos, minLength int, preAllocatedBuf []byte) int {
+ if sectionReader == nil {
+ return -1
+ }
readBytes, err := sectionReader.ReadAt(preAllocatedBuf, int64(startPos))
if err != nil && err != io.EOF {
return -1
@@ -89,35 +92,35 @@ func getSymbolLengthBoundaries(set common.StringSet) (int, int) {
// fillSymbol reads the symbol entry from the symbol section with the first 4 bytes of the name entry (which
// we read using readSymbolEntryInStringTable).
-func fillSymbol(symbol *elf.Symbol, byteOrder binary.ByteOrder, symbolName string, allocatedBufferForRead []byte, is64Bit bool) {
+func fillSymbol(symbol *safeelf.Symbol, byteOrder binary.ByteOrder, symbolName string, allocatedBufferForRead []byte, is64Bit bool) {
symbol.Name = symbolName
if is64Bit {
infoAndOther := byteOrder.Uint16(allocatedBufferForRead[0:2])
symbol.Info = uint8(infoAndOther >> 8)
symbol.Other = uint8(infoAndOther)
- symbol.Section = elf.SectionIndex(byteOrder.Uint16(allocatedBufferForRead[2:4]))
+ symbol.Section = safeelf.SectionIndex(byteOrder.Uint16(allocatedBufferForRead[2:4]))
symbol.Value = byteOrder.Uint64(allocatedBufferForRead[4:12])
symbol.Size = byteOrder.Uint64(allocatedBufferForRead[12:20])
} else {
infoAndOther := byteOrder.Uint16(allocatedBufferForRead[8:10])
symbol.Info = uint8(infoAndOther >> 8)
symbol.Other = uint8(infoAndOther)
- symbol.Section = elf.SectionIndex(byteOrder.Uint16(allocatedBufferForRead[10:12]))
+ symbol.Section = safeelf.SectionIndex(byteOrder.Uint16(allocatedBufferForRead[10:12]))
symbol.Value = uint64(byteOrder.Uint32(allocatedBufferForRead[0:4]))
symbol.Size = uint64(byteOrder.Uint32(allocatedBufferForRead[4:8]))
}
}
// getSymbolsUnified extracts the given symbol list from the binary.
-func getSymbolsUnified(f *elf.File, typ elf.SectionType, filter symbolFilter, is64Bit bool) ([]elf.Symbol, error) {
- symbolSize := elf.Sym32Size
+func getSymbolsUnified(f *safeelf.File, typ safeelf.SectionType, filter symbolFilter, is64Bit bool) ([]safeelf.Symbol, error) {
+ symbolSize := safeelf.Sym32Size
if is64Bit {
- symbolSize = elf.Sym64Size
+ symbolSize = safeelf.Sym64Size
}
// Getting the relevant symbol section.
symbolSection := f.SectionByType(typ)
if symbolSection == nil {
- return nil, elf.ErrNoSymbols
+ return nil, safeelf.ErrNoSymbols
}
// Checking the symbol section size is aligned to a multiplication of symbolSize.
@@ -129,11 +132,14 @@ func getSymbolsUnified(f *elf.File, typ elf.SectionType, filter symbolFilter, is
if symbolSection.Link <= 0 || symbolSection.Link >= uint32(len(f.Sections)) {
return nil, errors.New("section has invalid string table link")
}
+ if symbolSection.ReaderAt == nil {
+ return nil, errors.New("symbol section not available in random-access form")
+ }
numWanted := filter.getNumWanted()
// Allocating entries for all wanted symbols.
- symbols := make([]elf.Symbol, 0, numWanted)
+ symbols := make([]safeelf.Symbol, 0, numWanted)
// Extracting the min and max symbol length.
minSymbolNameSize, maxSymbolNameSize := filter.getMinMaxLength()
// Pre-allocating a buffer to read the symbol string into.
@@ -183,7 +189,7 @@ func getSymbolsUnified(f *elf.File, typ elf.SectionType, filter symbolFilter, is
continue
}
- var symbol elf.Symbol
+ var symbol safeelf.Symbol
// Complete the symbol reading.
// The symbol is composed of 4 bytes representing the symbol name in the string table, and rest is the fields
// of the symbols. So here we skip the first 4 bytes of the symbol, as we already processed it.
@@ -199,12 +205,12 @@ func getSymbolsUnified(f *elf.File, typ elf.SectionType, filter symbolFilter, is
return symbols, nil
}
-func getSymbols(f *elf.File, typ elf.SectionType, filter symbolFilter) ([]elf.Symbol, error) {
+func getSymbols(f *safeelf.File, typ safeelf.SectionType, filter symbolFilter) ([]safeelf.Symbol, error) {
switch f.Class {
- case elf.ELFCLASS64:
+ case safeelf.ELFCLASS64:
return getSymbolsUnified(f, typ, filter, true)
- case elf.ELFCLASS32:
+ case safeelf.ELFCLASS32:
return getSymbolsUnified(f, typ, filter, false)
}
@@ -214,31 +220,31 @@ func getSymbols(f *elf.File, typ elf.SectionType, filter symbolFilter) ([]elf.Sy
// GetAllSymbolsByName returns all filtered symbols in the given elf file,
// mapped by the symbol names. In case of a missing symbol, an error is
// returned.
-func GetAllSymbolsByName(elfFile *elf.File, filter symbolFilter) (map[string]elf.Symbol, error) {
- regularSymbols, regularSymbolsErr := getSymbols(elfFile, elf.SHT_SYMTAB, filter)
+func GetAllSymbolsByName(elfFile *safeelf.File, filter symbolFilter) (map[string]safeelf.Symbol, error) {
+ regularSymbols, regularSymbolsErr := getSymbols(elfFile, safeelf.SHT_SYMTAB, filter)
if regularSymbolsErr != nil && log.ShouldLog(seelog.TraceLvl) {
log.Tracef("Failed getting regular symbols of elf file: %s", regularSymbolsErr)
}
- var dynamicSymbols []elf.Symbol
+ var dynamicSymbols []safeelf.Symbol
var dynamicSymbolsErr error
numWanted := filter.getNumWanted()
if len(regularSymbols) != numWanted {
- dynamicSymbols, dynamicSymbolsErr = getSymbols(elfFile, elf.SHT_DYNSYM, filter)
+ dynamicSymbols, dynamicSymbolsErr = getSymbols(elfFile, safeelf.SHT_DYNSYM, filter)
if dynamicSymbolsErr != nil && log.ShouldLog(seelog.TraceLvl) {
log.Tracef("Failed getting dynamic symbols of elf file: %s", dynamicSymbolsErr)
}
}
// Only if we failed getting both regular and dynamic symbols - then we abort.
- if regularSymbolsErr == elf.ErrNoSymbols && dynamicSymbolsErr == elf.ErrNoSymbols {
- return nil, elf.ErrNoSymbols
+ if regularSymbolsErr == safeelf.ErrNoSymbols && dynamicSymbolsErr == safeelf.ErrNoSymbols {
+ return nil, safeelf.ErrNoSymbols
}
if regularSymbolsErr != nil && dynamicSymbolsErr != nil {
return nil, fmt.Errorf("could not open symbol sections to resolve symbol offset: %v, %v", regularSymbolsErr, dynamicSymbolsErr)
}
- symbolByName := make(map[string]elf.Symbol, len(regularSymbols)+len(dynamicSymbols))
+ symbolByName := make(map[string]safeelf.Symbol, len(regularSymbols)+len(dynamicSymbols))
for _, regularSymbol := range regularSymbols {
symbolByName[regularSymbol.Name] = regularSymbol
@@ -259,14 +265,14 @@ func GetAllSymbolsByName(elfFile *elf.File, filter symbolFilter) (map[string]elf
// GetAllSymbolsInSetByName returns all symbols (from the symbolSet) in the
// given elf file, mapped by the symbol names. In case of a missing symbol, an
// error is returned.
-func GetAllSymbolsInSetByName(elfFile *elf.File, symbolSet common.StringSet) (map[string]elf.Symbol, error) {
+func GetAllSymbolsInSetByName(elfFile *safeelf.File, symbolSet common.StringSet) (map[string]safeelf.Symbol, error) {
filter := newStringSetSymbolFilter(symbolSet)
return GetAllSymbolsByName(elfFile, filter)
}
// GetAnySymbolWithPrefix returns any one symbol with the given prefix and the
// specified maximum length from the ELF file.
-func GetAnySymbolWithPrefix(elfFile *elf.File, prefix string, maxLength int) (*elf.Symbol, error) {
+func GetAnySymbolWithPrefix(elfFile *safeelf.File, prefix string, maxLength int) (*safeelf.Symbol, error) {
filter := newPrefixSymbolFilter(prefix, maxLength)
symbols, err := GetAllSymbolsByName(elfFile, filter)
if err != nil {
@@ -284,7 +290,7 @@ func GetAnySymbolWithPrefix(elfFile *elf.File, prefix string, maxLength int) (*e
// GetAnySymbolWithPrefixPCLNTAB returns any one symbol with the given prefix and the
// specified maximum length from the pclntab section in ELF file.
-func GetAnySymbolWithPrefixPCLNTAB(elfFile *elf.File, prefix string, maxLength int) (*elf.Symbol, error) {
+func GetAnySymbolWithPrefixPCLNTAB(elfFile *safeelf.File, prefix string, maxLength int) (*safeelf.Symbol, error) {
symbols, err := GetPCLNTABSymbolParser(elfFile, newPrefixSymbolFilter(prefix, maxLength))
if err != nil {
return nil, err
diff --git a/pkg/network/go/bininspect/symbols_test.go b/pkg/network/go/bininspect/symbols_test.go
index 8d2cd3ce8aae2..3926e11c98e1a 100644
--- a/pkg/network/go/bininspect/symbols_test.go
+++ b/pkg/network/go/bininspect/symbols_test.go
@@ -8,7 +8,6 @@
package bininspect
import (
- "debug/elf"
"path/filepath"
"testing"
@@ -17,9 +16,10 @@ import (
"github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil"
"github.com/DataDog/datadog-agent/pkg/util/common"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
-func openTestElf(t *testing.T) *elf.File {
+func openTestElf(t *testing.T) *safeelf.File {
curDir, err := testutil.CurDir()
require.NoError(t, err)
@@ -27,7 +27,7 @@ func openTestElf(t *testing.T) *elf.File {
// always.
lib := filepath.Join(curDir, "..", "..", "usm", "testdata",
"site-packages", "ddtrace", "libssl.so.arm64")
- elfFile, err := elf.Open(lib)
+ elfFile, err := safeelf.Open(lib)
require.NoError(t, err)
return elfFile
diff --git a/pkg/network/go/bininspect/types.go b/pkg/network/go/bininspect/types.go
index 309c06377e2f5..c83be49b4e410 100644
--- a/pkg/network/go/bininspect/types.go
+++ b/pkg/network/go/bininspect/types.go
@@ -9,12 +9,13 @@
package bininspect
import (
- "debug/elf"
"errors"
"reflect"
- "github.com/DataDog/datadog-agent/pkg/network/go/goversion"
delve "github.com/go-delve/delve/pkg/goversion"
+
+ "github.com/DataDog/datadog-agent/pkg/network/go/goversion"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
const (
@@ -63,7 +64,7 @@ var StructOffsetLimitListenerConnNetConn = FieldIdentifier{
}
type elfMetadata struct {
- file *elf.File
+ file *safeelf.File
arch GoArch
}
diff --git a/pkg/network/go/bininspect/utils.go b/pkg/network/go/bininspect/utils.go
index 0f331c8d7265a..83aafa6cfedc4 100644
--- a/pkg/network/go/bininspect/utils.go
+++ b/pkg/network/go/bininspect/utils.go
@@ -9,24 +9,25 @@ package bininspect
import (
"debug/dwarf"
- "debug/elf"
"errors"
"fmt"
"strings"
+ "github.com/go-delve/delve/pkg/goversion"
+
"github.com/DataDog/datadog-agent/pkg/network/go/asmscan"
"github.com/DataDog/datadog-agent/pkg/network/go/binversion"
ddversion "github.com/DataDog/datadog-agent/pkg/network/go/goversion"
- "github.com/go-delve/delve/pkg/goversion"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// GetArchitecture returns the `runtime.GOARCH`-compatible names of the architecture.
// Only returns a value for supported architectures.
-func GetArchitecture(elfFile *elf.File) (GoArch, error) {
+func GetArchitecture(elfFile *safeelf.File) (GoArch, error) {
switch elfFile.FileHeader.Machine {
- case elf.EM_X86_64:
+ case safeelf.EM_X86_64:
return GoArchX86_64, nil
- case elf.EM_AARCH64:
+ case safeelf.EM_AARCH64:
return GoArchARM64, nil
}
@@ -36,7 +37,7 @@ func GetArchitecture(elfFile *elf.File) (GoArch, error) {
// HasDwarfInfo attempts to parse the DWARF data and look for any records.
// If it cannot be parsed or if there are no DWARF info records,
// then it assumes that the binary has been stripped.
-func HasDwarfInfo(elfFile *elf.File) (*dwarf.Data, bool) {
+func HasDwarfInfo(elfFile *safeelf.File) (*dwarf.Data, bool) {
dwarfData, err := elfFile.DWARF()
if err != nil {
return nil, false
@@ -55,7 +56,7 @@ func HasDwarfInfo(elfFile *elf.File) (*dwarf.Data, bool) {
// The implementation is available in src/cmd/go/internal/version/version.go:
// https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/cmd/go/internal/version/version.go
// The main logic was pulled out to a sub-package, `binversion`
-func FindGoVersion(elfFile *elf.File) (ddversion.GoVersion, error) {
+func FindGoVersion(elfFile *safeelf.File) (ddversion.GoVersion, error) {
version, err := binversion.ReadElfBuildInfo(elfFile)
if err != nil {
return ddversion.GoVersion{}, fmt.Errorf("could not get Go toolchain version from ELF binary file: %w", err)
@@ -90,7 +91,7 @@ func FindABI(version ddversion.GoVersion, arch GoArch) (GoABI, error) {
// - https://github.com/go-delve/delve/pull/2704/files#diff-fb7b7a020e32bf8bf477c052ac2d2857e7e587478be6039aebc7135c658417b2R769
// - https://github.com/go-delve/delve/blob/75bbbbb60cecda0d65c63de7ae8cb8b8412d6fc3/pkg/proc/breakpoints.go#L86-L95
// - https://github.com/go-delve/delve/blob/75bbbbb60cecda0d65c63de7ae8cb8b8412d6fc3/pkg/proc/breakpoints.go#L374
-func FindReturnLocations(elfFile *elf.File, sym elf.Symbol, functionOffset uint64) ([]uint64, error) {
+func FindReturnLocations(elfFile *safeelf.File, sym safeelf.Symbol, functionOffset uint64) ([]uint64, error) {
arch, err := GetArchitecture(elfFile)
if err != nil {
return nil, err
@@ -112,15 +113,15 @@ func FindReturnLocations(elfFile *elf.File, sym elf.Symbol, functionOffset uint6
}
// SymbolToOffset returns the offset of the given symbol name in the given elf file.
-func SymbolToOffset(f *elf.File, symbol elf.Symbol) (uint32, error) {
+func SymbolToOffset(f *safeelf.File, symbol safeelf.Symbol) (uint32, error) {
if f == nil {
return 0, errors.New("got nil elf file")
}
- var sectionsToSearchForSymbol []*elf.Section
+ var sectionsToSearchForSymbol []*safeelf.Section
for i := range f.Sections {
- if f.Sections[i].Flags == elf.SHF_ALLOC+elf.SHF_EXECINSTR {
+ if f.Sections[i].Flags == safeelf.SHF_ALLOC+safeelf.SHF_EXECINSTR {
sectionsToSearchForSymbol = append(sectionsToSearchForSymbol, f.Sections[i])
}
}
@@ -129,7 +130,7 @@ func SymbolToOffset(f *elf.File, symbol elf.Symbol) (uint32, error) {
return 0, fmt.Errorf("symbol %q not found in file - no sections to search", symbol)
}
- var executableSection *elf.Section
+ var executableSection *safeelf.Section
// Find what section the symbol is in by checking the executable section's
// addr space.
diff --git a/pkg/network/go/binversion/buildinfo.go b/pkg/network/go/binversion/buildinfo.go
index 8914693bd62e1..e95fd862ffec5 100644
--- a/pkg/network/go/binversion/buildinfo.go
+++ b/pkg/network/go/binversion/buildinfo.go
@@ -29,11 +29,11 @@ package binversion
import (
"bytes"
- "debug/elf"
"encoding/binary"
"errors"
"io"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
ddsync "github.com/DataDog/datadog-agent/pkg/util/sync"
)
@@ -77,7 +77,7 @@ type exe interface {
// ReadElfBuildInfo extracts the Go toolchain version and module information
// strings from a Go binary. On success, vers should be non-empty. mod
// is empty if the binary was not built with modules enabled.
-func ReadElfBuildInfo(elfFile *elf.File) (vers string, err error) {
+func ReadElfBuildInfo(elfFile *safeelf.File) (vers string, err error) {
x := &elfExe{f: elfFile}
// Read the first 64kB of dataAddr to find the build info blob.
@@ -176,7 +176,7 @@ func readString(x exe, ptrSize int, readPtr func([]byte) uint64, addr uint64) st
// elfExe is the ELF implementation of the exe interface.
type elfExe struct {
- f *elf.File
+ f *safeelf.File
}
func (x *elfExe) ReadData(addr, size uint64) ([]byte, error) {
@@ -225,7 +225,7 @@ func (x *elfExe) DataStart() uint64 {
}
}
for _, p := range x.f.Progs {
- if p.Type == elf.PT_LOAD && p.Flags&(elf.PF_X|elf.PF_W) == elf.PF_W {
+ if p.Type == safeelf.PT_LOAD && p.Flags&(safeelf.PF_X|safeelf.PF_W) == safeelf.PF_W {
return p.Vaddr
}
}
diff --git a/pkg/network/go/goid/internal/generate_goid_lut.go b/pkg/network/go/goid/internal/generate_goid_lut.go
index d21533c7f0566..0d7145d03d411 100644
--- a/pkg/network/go/goid/internal/generate_goid_lut.go
+++ b/pkg/network/go/goid/internal/generate_goid_lut.go
@@ -9,7 +9,6 @@ package main
import (
"context"
- "debug/elf"
"flag"
"fmt"
"log"
@@ -21,6 +20,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/network/go/dwarfutils"
"github.com/DataDog/datadog-agent/pkg/network/go/goversion"
"github.com/DataDog/datadog-agent/pkg/network/go/lutgen"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
var (
@@ -134,7 +134,7 @@ func inspectBinary(binary lutgen.Binary) (interface{}, error) {
return 0, err
}
defer file.Close()
- elfFile, err := elf.NewFile(file)
+ elfFile, err := safeelf.NewFile(file)
if err != nil {
return 0, err
}
diff --git a/pkg/network/protocols/http/gotls/lookup/internal/generate_luts.go b/pkg/network/protocols/http/gotls/lookup/internal/generate_luts.go
index b2596f9de3e49..bfe620de890b6 100644
--- a/pkg/network/protocols/http/gotls/lookup/internal/generate_luts.go
+++ b/pkg/network/protocols/http/gotls/lookup/internal/generate_luts.go
@@ -9,7 +9,6 @@ package main
import (
"context"
- "debug/elf"
_ "embed"
"flag"
"fmt"
@@ -23,6 +22,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/network/go/bininspect"
"github.com/DataDog/datadog-agent/pkg/network/go/goversion"
"github.com/DataDog/datadog-agent/pkg/network/go/lutgen"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
var (
@@ -255,7 +255,7 @@ func inspectBinary(binary lutgen.Binary) (interface{}, error) {
}
defer file.Close()
- elfFile, err := elf.NewFile(file)
+ elfFile, err := safeelf.NewFile(file)
if err != nil {
return bininspect.Result{}, err
}
diff --git a/pkg/network/protocols/http/protocol.go b/pkg/network/protocols/http/protocol.go
index 3226f6ce8791c..0c1c4e8b2bc06 100644
--- a/pkg/network/protocols/http/protocol.go
+++ b/pkg/network/protocols/http/protocol.go
@@ -195,7 +195,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) {
log.Errorf("error getting http_in_flight map: %s", err)
return
}
- mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](httpMap, 1024)
+ mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](httpMap, 1024, inFlightMap, "usm_monitor")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
diff --git a/pkg/network/protocols/http2/dynamic_table.go b/pkg/network/protocols/http2/dynamic_table.go
index 4c1ef6e524835..1a20ae06603c7 100644
--- a/pkg/network/protocols/http2/dynamic_table.go
+++ b/pkg/network/protocols/http2/dynamic_table.go
@@ -86,7 +86,7 @@ func (dt *DynamicTable) setupDynamicTableMapCleaner(mgr *manager.Manager, cfg *c
return fmt.Errorf("error getting http2 dynamic table map: %w", err)
}
- mapCleaner, err := ddebpf.NewMapCleaner[HTTP2DynamicTableIndex, HTTP2DynamicTableEntry](dynamicTableMap, defaultMapCleanerBatchSize)
+ mapCleaner, err := ddebpf.NewMapCleaner[HTTP2DynamicTableIndex, HTTP2DynamicTableEntry](dynamicTableMap, defaultMapCleanerBatchSize, dynamicTable, "usm_monitor")
if err != nil {
return fmt.Errorf("error creating a map cleaner for http2 dynamic table: %w", err)
}
diff --git a/pkg/network/protocols/http2/protocol.go b/pkg/network/protocols/http2/protocol.go
index 283a1abc1714e..433f55e15715e 100644
--- a/pkg/network/protocols/http2/protocol.go
+++ b/pkg/network/protocols/http2/protocol.go
@@ -59,10 +59,8 @@ const (
eosParserTailCall = "socket__http2_eos_parser"
eventStream = "http2"
- // TelemetryMap is the name of the map used to retrieve plaintext metrics from the kernel
+ // TelemetryMap is the name of the map that collects telemetry for plaintext and TLS encrypted HTTP/2 traffic.
TelemetryMap = "http2_telemetry"
- // TLSTelemetryMap is the name of the map used to retrieve metrics from the eBPF probes for TLS
- TLSTelemetryMap = "tls_http2_telemetry"
tlsFirstFrameTailCall = "uprobe__http2_tls_handle_first_frame"
tlsFilterTailCall = "uprobe__http2_tls_filter"
@@ -316,13 +314,8 @@ func (p *Protocol) updateKernelTelemetry(mgr *manager.Manager) {
return
}
- tlsMap, err := protocols.GetMap(mgr, TLSTelemetryMap)
- if err != nil {
- log.Warn(err)
- return
- }
-
- var zero uint32
+ plaintextKey := uint32(0)
+ tlsKey := uint32(1)
http2Telemetry := &HTTP2Telemetry{}
ticker := time.NewTicker(30 * time.Second)
@@ -332,14 +325,14 @@ func (p *Protocol) updateKernelTelemetry(mgr *manager.Manager) {
for {
select {
case <-ticker.C:
- if err := mp.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(http2Telemetry)); err != nil {
+ if err := mp.Lookup(unsafe.Pointer(&plaintextKey), unsafe.Pointer(http2Telemetry)); err != nil {
log.Errorf("unable to lookup %q map: %s", TelemetryMap, err)
return
}
p.http2Telemetry.update(http2Telemetry, false)
- if err := tlsMap.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(http2Telemetry)); err != nil {
- log.Errorf("unable to lookup %q map: %s", TLSTelemetryMap, err)
+ if err := mp.Lookup(unsafe.Pointer(&tlsKey), unsafe.Pointer(http2Telemetry)); err != nil {
+ log.Errorf("unable to lookup %q map: %s", TelemetryMap, err)
return
}
p.http2Telemetry.update(http2Telemetry, true)
@@ -408,7 +401,7 @@ func (p *Protocol) setupHTTP2InFlightMapCleaner(mgr *manager.Manager) {
log.Errorf("error getting %q map: %s", InFlightMap, err)
return
}
- mapCleaner, err := ddebpf.NewMapCleaner[HTTP2StreamKey, HTTP2Stream](http2Map, 1024)
+ mapCleaner, err := ddebpf.NewMapCleaner[HTTP2StreamKey, HTTP2Stream](http2Map, 1024, InFlightMap, "usm_monitor")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go
index 41d0dc72b3854..54bcb67ff5fdf 100644
--- a/pkg/network/protocols/kafka/protocol.go
+++ b/pkg/network/protocols/kafka/protocol.go
@@ -51,7 +51,6 @@ const (
kafkaHeapMap = "kafka_heap"
inFlightMap = "kafka_in_flight"
responseMap = "kafka_response"
- telemetryMap = "kafka_telemetry"
tlsFilterTailCall = "uprobe__kafka_tls_filter"
@@ -88,7 +87,7 @@ var Spec = &protocols.ProtocolSpec{
Name: "kafka_topic_name",
},
{
- Name: telemetryMap,
+ Name: eBPFTelemetryMap,
},
{
Name: "kafka_batch_events",
@@ -312,7 +311,7 @@ func (p *protocol) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) {
for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) {
spew.Fdump(w, key, value)
}
- case telemetryMap:
+ case eBPFTelemetryMap:
var zeroKey uint32
var value RawKernelTelemetry
@@ -332,11 +331,11 @@ func (p *protocol) processKafka(events []EbpfTx) {
}
func (p *protocol) setupInFlightMapCleaner(mgr *manager.Manager) error {
- inFlightMap, _, err := mgr.GetMap(inFlightMap)
+ kafkaInFlight, _, err := mgr.GetMap(inFlightMap)
if err != nil {
return err
}
- mapCleaner, err := ddebpf.NewMapCleaner[KafkaTransactionKey, KafkaTransaction](inFlightMap, 1024)
+ mapCleaner, err := ddebpf.NewMapCleaner[KafkaTransactionKey, KafkaTransaction](kafkaInFlight, 1024, inFlightMap, "usm_monitor")
if err != nil {
return err
}
diff --git a/pkg/network/protocols/postgres/protocol.go b/pkg/network/protocols/postgres/protocol.go
index fd044eb715055..6bc245896a91a 100644
--- a/pkg/network/protocols/postgres/protocol.go
+++ b/pkg/network/protocols/postgres/protocol.go
@@ -231,7 +231,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) {
log.Errorf("error getting %s map: %s", InFlightMap, err)
return
}
- mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx](postgresInflight, 1024)
+ mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx](postgresInflight, 1024, InFlightMap, "usm_monitor")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
diff --git a/pkg/network/protocols/redis/protocol.go b/pkg/network/protocols/redis/protocol.go
index 06d19bf57aa5a..831d019b30416 100644
--- a/pkg/network/protocols/redis/protocol.go
+++ b/pkg/network/protocols/redis/protocol.go
@@ -174,7 +174,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) {
return
}
- mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](redisInFlight, 1024)
+ mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](redisInFlight, 1024, inFlightMap, "usm_monitor")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
diff --git a/pkg/network/protocols/testutil/serverutils.go b/pkg/network/protocols/testutil/serverutils.go
index d6476dc4fbf5c..1bced2d2d7a37 100644
--- a/pkg/network/protocols/testutil/serverutils.go
+++ b/pkg/network/protocols/testutil/serverutils.go
@@ -34,7 +34,11 @@ func GetDockerPID(dockerName string) (int64, error) {
if err := c.Run(); err != nil {
return 0, fmt.Errorf("failed to get %s pid: %s", dockerName, stderr.String())
}
- return strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64)
+ pid, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64)
+ if pid == 0 {
+ return 0, fmt.Errorf("failed to retrieve %s pid, container is not running", dockerName)
+ }
+ return pid, err
}
// RunDockerServer is a template for running a protocols server in a docker.
diff --git a/pkg/network/protocols/tls/nodejs/nodejs.go b/pkg/network/protocols/tls/nodejs/nodejs.go
index 5d2e225829131..09ee5ecebf497 100644
--- a/pkg/network/protocols/tls/nodejs/nodejs.go
+++ b/pkg/network/protocols/tls/nodejs/nodejs.go
@@ -3,6 +3,8 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
+//go:build test
+
// Package nodejs provides helpers to run nodejs HTTPs server.
package nodejs
diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go
index c305adfacaf33..1ec93380f4462 100644
--- a/pkg/network/tracer/connection/ebpf_tracer.go
+++ b/pkg/network/tracer/connection/ebpf_tracer.go
@@ -703,7 +703,7 @@ func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) {
return
}
- tcpOngoingConnectPidCleaner, err := ddebpf.NewMapCleaner[netebpf.SkpConn, netebpf.PidTs](tcpOngoingConnectPidMap, 1024)
+ tcpOngoingConnectPidCleaner, err := ddebpf.NewMapCleaner[netebpf.SkpConn, netebpf.PidTs](tcpOngoingConnectPidMap, 1024, probes.TCPOngoingConnectPid, "npm_tracer")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
@@ -724,7 +724,7 @@ func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) {
if err != nil {
log.Errorf("error getting %v map: %s", probes.ConnCloseFlushed, err)
}
- connCloseFlushCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, int64](connCloseFlushMap, 1024)
+ connCloseFlushCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, int64](connCloseFlushMap, 1024, probes.ConnCloseFlushed, "npm_tracer")
if err != nil {
log.Errorf("error creating map cleaner: %s", err)
return
diff --git a/pkg/network/tracer/connection/ebpfless/tcp_processor.go b/pkg/network/tracer/connection/ebpfless/tcp_processor.go
new file mode 100644
index 0000000000000..d1d6768b6fb9f
--- /dev/null
+++ b/pkg/network/tracer/connection/ebpfless/tcp_processor.go
@@ -0,0 +1,203 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ebpfless
+
+import (
+ "fmt"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/google/gopacket/layers"
+
+ "github.com/DataDog/datadog-agent/pkg/network"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+type connectionState struct {
+ tcpState ConnStatus
+
+ // hasSentPacket is whether anything has been sent outgoing (aka whether maxSeqSent exists)
+ hasSentPacket bool
+ // maxSeqSent is the latest outgoing tcp.Seq if hasSentPacket==true
+ maxSeqSent uint32
+
+ // hasLocalAck is whether there have been outgoing ACK's
+ hasLocalAck bool
+ // lastLocalAck is the latest outgoing tcp.Ack if hasLocalAck
+ lastLocalAck uint32
+ // hasRemoteAck is whether there have been incoming ACK's
+ hasRemoteAck bool
+ // lastRemoteAck is the latest incoming tcp.Ack if hasRemoteAck
+ lastRemoteAck uint32
+
+ // localSynState is the status of the outgoing SYN handshake
+ localSynState SynState
+ // remoteSynState is the status of the incoming SYN handshake
+ remoteSynState SynState
+
+ // hasLocalFin is whether the outgoing side has FIN'd
+ hasLocalFin bool
+ // hasRemoteFin is whether the incoming side has FIN'd
+ hasRemoteFin bool
+ // localFinSeq is the tcp.Seq number for the outgoing FIN (including any payload length)
+ localFinSeq uint32
+ // remoteFinSeq is the tcp.Seq number for the incoming FIN (including any payload length)
+ remoteFinSeq uint32
+}
+
+type TCPProcessor struct {
+ conns map[network.ConnectionTuple]connectionState
+}
+
+func NewTCPProcessor() *TCPProcessor {
+ return &TCPProcessor{
+ conns: map[network.ConnectionTuple]connectionState{},
+ }
+}
+
+func (t *TCPProcessor) updateSynFlag(conn *network.ConnectionStats, st *connectionState, pktType uint8, tcp *layers.TCP, payloadLen uint16) {
+ if tcp.RST {
+ return
+ }
+ // progress the synStates based off this packet
+ if pktType == unix.PACKET_OUTGOING {
+ st.localSynState.update(tcp.SYN, tcp.ACK)
+ } else {
+ st.remoteSynState.update(tcp.SYN, tcp.ACK)
+ }
+ // if any SynState has progressed, move to attempted
+ if st.tcpState == ConnStatClosed && (st.localSynState != SynStateNone || st.remoteSynState != SynStateNone) {
+ st.tcpState = ConnStatAttempted
+ }
+ // if both synStates are ack'd, move to established
+ if st.tcpState == ConnStatAttempted && st.localSynState == SynStateAcked && st.remoteSynState == SynStateAcked {
+ st.tcpState = ConnStatEstablished
+ conn.Monotonic.TCPEstablished++
+ }
+}
+
+// updateTcpStats is designed to mirror the stat tracking in the windows driver's handleFlowProtocolTcp
+// https://github.com/DataDog/datadog-windows-filter/blob/d7560d83eb627117521d631a4c05cd654a01987e/ddfilter/flow/flow_tcp.c#L91
+func (t *TCPProcessor) updateTcpStats(conn *network.ConnectionStats, st *connectionState, pktType uint8, tcp *layers.TCP, payloadLen uint16) {
+ payloadSeq := tcp.Seq + uint32(payloadLen)
+
+ if pktType == unix.PACKET_OUTGOING {
+ conn.Monotonic.SentPackets++
+ if !st.hasSentPacket || isSeqBefore(st.maxSeqSent, payloadSeq) {
+ st.hasSentPacket = true
+ conn.Monotonic.SentBytes += uint64(payloadLen)
+ st.maxSeqSent = payloadSeq
+ }
+
+ ackOutdated := !st.hasLocalAck || isSeqBefore(st.lastLocalAck, tcp.Ack)
+ if tcp.ACK && ackOutdated {
+ // wait until data comes in via SynStateAcked
+ if st.hasLocalAck && st.remoteSynState == SynStateAcked {
+ ackDiff := tcp.Ack - st.lastLocalAck
+ // if this is ack'ing a fin packet, there is an extra sequence number to cancel out
+ isFinAck := st.hasRemoteFin && tcp.Ack == st.remoteFinSeq+1
+ if isFinAck {
+ ackDiff--
+ }
+ conn.Monotonic.RecvBytes += uint64(ackDiff)
+ }
+
+ st.hasLocalAck = true
+ st.lastLocalAck = tcp.Ack
+ }
+ } else {
+ conn.Monotonic.RecvPackets++
+
+ ackOutdated := !st.hasRemoteAck || isSeqBefore(st.lastRemoteAck, tcp.Ack)
+ if tcp.ACK && ackOutdated {
+ st.hasRemoteAck = true
+ st.lastRemoteAck = tcp.Ack
+ }
+ }
+}
+
+func (t *TCPProcessor) updateFinFlag(conn *network.ConnectionStats, st *connectionState, pktType uint8, tcp *layers.TCP, payloadLen uint16) {
+ payloadSeq := tcp.Seq + uint32(payloadLen)
+ // update FIN sequence numbers
+ if tcp.FIN {
+ if pktType == unix.PACKET_OUTGOING {
+ st.hasLocalFin = true
+ st.localFinSeq = payloadSeq
+ } else {
+ st.hasRemoteFin = true
+ st.remoteFinSeq = payloadSeq
+ }
+ }
+
+ // if both fins have been sent and ack'd, then mark the connection closed
+ localFinIsAcked := st.hasLocalFin && isSeqBefore(st.localFinSeq, st.lastRemoteAck)
+ remoteFinIsAcked := st.hasRemoteFin && isSeqBefore(st.remoteFinSeq, st.lastLocalAck)
+ if st.tcpState == ConnStatEstablished && localFinIsAcked && remoteFinIsAcked {
+ *st = connectionState{
+ tcpState: ConnStatClosed,
+ }
+ conn.Monotonic.TCPClosed++
+ }
+}
+
+func (t *TCPProcessor) updateRstFlag(conn *network.ConnectionStats, st *connectionState, pktType uint8, tcp *layers.TCP, payloadLen uint16) {
+ if !tcp.RST || st.tcpState == ConnStatClosed {
+ return
+ }
+
+ reason := syscall.ECONNRESET
+ if st.tcpState == ConnStatAttempted {
+ reason = syscall.ECONNREFUSED
+ }
+
+ *st = connectionState{
+ tcpState: ConnStatClosed,
+ }
+ conn.TCPFailures[uint16(reason)]++
+ conn.Monotonic.TCPClosed++
+}
+
+// Process handles a TCP packet, calculating stats and keeping track of its state according to the
+// TCP state machine.
+func (t *TCPProcessor) Process(conn *network.ConnectionStats, pktType uint8, ip4 *layers.IPv4, ip6 *layers.IPv6, tcp *layers.TCP) error {
+ if pktType != unix.PACKET_OUTGOING && pktType != unix.PACKET_HOST {
+ return fmt.Errorf("TCPProcessor saw invalid pktType: %d", pktType)
+ }
+ payloadLen, err := TCPPayloadLen(conn.Family, ip4, ip6, tcp)
+ if err != nil {
+ return err
+ }
+
+ log.TraceFunc(func() string {
+ return "tcp processor: " + debugPacketInfo(pktType, tcp, payloadLen)
+ })
+
+ // skip invalid packets we don't recognize:
+ noFlagsCombo := !tcp.SYN && !tcp.FIN && !tcp.ACK && !tcp.RST
+ if noFlagsCombo {
+ // no flags at all (I think this can happen for expanding the TCP window sometimes?)
+ statsTelemetry.missingTCPFlags.Inc()
+ return nil
+ }
+ synFinCombo := tcp.SYN && tcp.FIN
+ if synFinCombo {
+ statsTelemetry.tcpSynAndFin.Inc()
+ return nil
+ }
+
+ st := t.conns[conn.ConnectionTuple]
+
+ t.updateSynFlag(conn, &st, pktType, tcp, payloadLen)
+ t.updateTcpStats(conn, &st, pktType, tcp, payloadLen)
+ t.updateFinFlag(conn, &st, pktType, tcp, payloadLen)
+ t.updateRstFlag(conn, &st, pktType, tcp, payloadLen)
+
+ t.conns[conn.ConnectionTuple] = st
+ return nil
+}
diff --git a/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go b/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go
new file mode 100644
index 0000000000000..eba649cf72803
--- /dev/null
+++ b/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go
@@ -0,0 +1,809 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ebpfless
+
+import (
+ "net"
+ "syscall"
+ "testing"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/google/gopacket/layers"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/pkg/network"
+ "github.com/DataDog/datadog-agent/pkg/process/util"
+)
+
+var localhost net.IP = net.ParseIP("127.0.0.1")
+var remoteIP net.IP = net.ParseIP("12.34.56.78")
+
+const (
+ minIhl = 5
+ defaultLocalPort = 12345
+ defaultRemotePort = 8080
+ defaultNsId = 123
+)
+
+const (
+ FIN = 0x01
+ SYN = 0x02
+ RST = 0x04
+ ACK = 0x10
+)
+
+func ipv4Packet(src, dst net.IP, length uint16) layers.IPv4 {
+ return layers.IPv4{
+ Version: 4,
+ IHL: minIhl,
+ TOS: 0,
+ Length: length,
+ Id: 12345,
+ Flags: 0x02,
+ FragOffset: 0,
+ TTL: 64,
+ Protocol: 6,
+ Checksum: 0xbeef,
+ SrcIP: src,
+ DstIP: dst,
+ }
+}
+
+func tcpPacket(srcPort, dstPort uint16, seq, ack uint32, flags uint8) layers.TCP {
+ return layers.TCP{
+ DataOffset: 5,
+ Window: 65535,
+ SrcPort: layers.TCPPort(srcPort),
+ DstPort: layers.TCPPort(dstPort),
+ Seq: seq,
+ Ack: ack,
+ FIN: flags&FIN != 0,
+ SYN: flags&SYN != 0,
+ RST: flags&RST != 0,
+ ACK: flags&ACK != 0,
+ }
+}
+
+type testCapture struct {
+ pktType uint8
+ ipv4 *layers.IPv4
+ ipv6 *layers.IPv6
+ tcp *layers.TCP
+}
+
+func (tc testCapture) reverse() testCapture {
+ ret := tc
+ if tc.pktType == unix.PACKET_HOST {
+ ret.pktType = unix.PACKET_OUTGOING
+ } else {
+ ret.pktType = unix.PACKET_HOST
+ }
+ if tc.ipv4 != nil {
+ ipv4 := *tc.ipv4
+ ipv4.SrcIP, ipv4.DstIP = ipv4.DstIP, ipv4.SrcIP
+ ret.ipv4 = &ipv4
+ }
+ if tc.ipv6 != nil {
+ ipv6 := *tc.ipv6
+ ipv6.SrcIP, ipv6.DstIP = ipv6.DstIP, ipv6.SrcIP
+ ret.ipv6 = &ipv6
+ }
+ tcp := *tc.tcp
+ tcp.SrcPort, tcp.DstPort = tcp.DstPort, tcp.SrcPort
+ ret.tcp = &tcp
+ return ret
+}
+func reversePkts(tc []testCapture) []testCapture {
+ var ret []testCapture
+ for _, t := range tc {
+ ret = append(ret, t.reverse())
+ }
+ return ret
+}
+
+// TODO can this be merged with the logic creating scratchConns in ebpfless tracer?
+func makeTcpStates(synPkt testCapture) *network.ConnectionStats {
+ var family network.ConnectionFamily
+ var srcIP, dstIP net.IP
+ if synPkt.ipv4 != nil && synPkt.ipv6 != nil {
+ panic("testCapture shouldn't have both IP families")
+ }
+ if synPkt.ipv4 != nil {
+ family = network.AFINET
+ srcIP = synPkt.ipv4.SrcIP
+ dstIP = synPkt.ipv4.DstIP
+ } else if synPkt.ipv6 != nil {
+ family = network.AFINET6
+ srcIP = synPkt.ipv6.SrcIP
+ dstIP = synPkt.ipv6.DstIP
+ } else {
+ panic("testCapture had no IP family")
+ }
+ var direction network.ConnectionDirection
+ switch synPkt.pktType {
+ case unix.PACKET_HOST:
+ direction = network.INCOMING
+ case unix.PACKET_OUTGOING:
+ direction = network.OUTGOING
+ default:
+ panic("testCapture had unknown packet type")
+ }
+ return &network.ConnectionStats{
+ ConnectionTuple: network.ConnectionTuple{
+ Source: util.AddressFromNetIP(srcIP),
+ Dest: util.AddressFromNetIP(dstIP),
+ Pid: 0, // @stu we can't know this right
+ NetNS: defaultNsId,
+ SPort: uint16(synPkt.tcp.SrcPort),
+ DPort: uint16(synPkt.tcp.DstPort),
+ Type: network.TCP,
+ Family: family,
+ },
+ Direction: direction,
+ TCPFailures: make(map[uint16]uint32),
+ }
+}
+
+type tcpTestFixture struct {
+ t *testing.T
+ tcp *TCPProcessor
+ conn *network.ConnectionStats
+ localSeqBase, remoteSeqBase uint32
+}
+
+const TCP_HEADER_SIZE = 20
+
+func (fixture *tcpTestFixture) incoming(payloadLen uint16, relSeq, relAck uint32, flags uint8) testCapture {
+ ipv4 := ipv4Packet(remoteIP, localhost, minIhl*4+TCP_HEADER_SIZE+payloadLen)
+ seq := relSeq + fixture.localSeqBase
+ ack := relAck + fixture.remoteSeqBase
+ tcp := tcpPacket(defaultRemotePort, defaultLocalPort, seq, ack, flags)
+ return testCapture{
+ pktType: unix.PACKET_HOST,
+ ipv4: &ipv4,
+ ipv6: nil,
+ tcp: &tcp,
+ }
+}
+
+func (fixture *tcpTestFixture) outgoing(payloadLen uint16, relSeq, relAck uint32, flags uint8) testCapture {
+ ipv4 := ipv4Packet(localhost, remoteIP, minIhl*4+TCP_HEADER_SIZE+payloadLen)
+ seq := relSeq + fixture.remoteSeqBase
+ ack := relAck + fixture.localSeqBase
+ tcp := tcpPacket(defaultLocalPort, defaultRemotePort, seq, ack, flags)
+ return testCapture{
+ pktType: unix.PACKET_OUTGOING,
+ ipv4: &ipv4,
+ ipv6: nil,
+ tcp: &tcp,
+ }
+}
+
+func newTcpTestFixture(t *testing.T, localSeqBase, remoteSeqBase uint32) *tcpTestFixture {
+ return &tcpTestFixture{
+ t: t,
+ tcp: NewTCPProcessor(),
+ conn: nil,
+ localSeqBase: localSeqBase,
+ remoteSeqBase: remoteSeqBase,
+ }
+}
+
+func (fixture *tcpTestFixture) runPkt(pkt testCapture) {
+ if fixture.conn == nil {
+ fixture.conn = makeTcpStates(pkt)
+ }
+ err := fixture.tcp.Process(fixture.conn, pkt.pktType, pkt.ipv4, pkt.ipv6, pkt.tcp)
+ require.NoError(fixture.t, err)
+}
+
+func (fixture *tcpTestFixture) runPkts(packets []testCapture) {
+ for _, pkt := range packets {
+ fixture.runPkt(pkt)
+ }
+}
+
+func (fixture *tcpTestFixture) runAgainstState(packets []testCapture, expected []ConnStatus) {
+ require.Equal(fixture.t, len(packets), len(expected), "packet length didn't match expected states length")
+ var expectedStrs []string
+ var actualStrs []string
+
+ for i, pkt := range packets {
+ expectedStrs = append(expectedStrs, LabelForState(expected[i]))
+
+ fixture.runPkt(pkt)
+ connTuple := fixture.conn.ConnectionTuple
+ actual := fixture.tcp.conns[connTuple].tcpState
+ actualStrs = append(actualStrs, LabelForState(actual))
+ }
+ require.Equal(fixture.t, expectedStrs, actualStrs)
+}
+
+func testBasicHandshake(t *testing.T, f *tcpTestFixture) {
+
+ basicHandshake := []testCapture{
+ f.outgoing(0, 0, 0, SYN),
+ f.incoming(0, 0, 1, SYN|ACK),
+ // separate ack and first send of data
+ f.outgoing(0, 1, 1, ACK),
+ f.outgoing(123, 1, 1, ACK),
+ // acknowledge data separately
+ f.incoming(0, 1, 124, ACK),
+ f.incoming(345, 1, 124, ACK),
+ // remote FINs separately
+ f.incoming(0, 346, 124, FIN|ACK),
+ // local acknowledges data, (not the FIN)
+ f.outgoing(0, 124, 346, ACK),
+ // local acknowledges FIN and sends their own
+ f.outgoing(0, 124, 347, FIN|ACK),
+ // remote sends final ACK
+ f.incoming(0, 347, 125, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ // three-way handshake finishes here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ // passive close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ // final FIN was ack'd
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 123,
+ RecvBytes: 345,
+ SentPackets: 5,
+ RecvPackets: 5,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+var lowerSeq uint32 = 2134452051
+var higherSeq uint32 = 2973263073
+
+func TestBasicHandshake(t *testing.T) {
+ t.Run("localSeq lt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+ testBasicHandshake(t, f)
+ })
+
+ t.Run("localSeq gt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, higherSeq, lowerSeq)
+ testBasicHandshake(t, f)
+ })
+}
+
+func testReversedBasicHandshake(t *testing.T, f *tcpTestFixture) {
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ // separate ack and first send of data
+ f.incoming(0, 1, 1, ACK),
+ f.incoming(123, 1, 1, ACK),
+ // acknowledge data separately
+ f.outgoing(0, 1, 124, ACK),
+ f.outgoing(345, 1, 124, ACK),
+ // local FINs separately
+ f.outgoing(0, 346, 124, FIN|ACK),
+ // remote acknowledges data, (not the FIN)
+ f.incoming(0, 124, 346, ACK),
+ // remote acknowledges FIN and sends their own
+ f.incoming(0, 124, 347, FIN|ACK),
+ // local sends final ACK
+ f.outgoing(0, 347, 125, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ // three-way handshake finishes here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 345,
+ RecvBytes: 123,
+ SentPackets: 5,
+ RecvPackets: 5,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestReversedBasicHandshake(t *testing.T) {
+ t.Run("localSeq lt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+ testReversedBasicHandshake(t, f)
+ })
+
+ t.Run("localSeq gt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, higherSeq, lowerSeq)
+ testReversedBasicHandshake(t, f)
+ })
+}
+
+func testCloseWaitState(t *testing.T, f *tcpTestFixture) {
+ // test the CloseWait state, which is when the local client still has data left
+ // to send during a passive close
+
+ basicHandshake := []testCapture{
+ f.outgoing(0, 0, 0, SYN),
+ f.incoming(0, 0, 1, SYN|ACK),
+ // local sends data right out the gate with ACK
+ f.outgoing(123, 1, 1, ACK),
+ // remote acknowledges and sends data back
+ f.incoming(345, 1, 124, ACK),
+ // remote FINs separately
+ f.incoming(0, 346, 124, FIN|ACK),
+ // local acknowledges FIN, but keeps sending data for a bit
+ f.outgoing(100, 124, 347, ACK),
+ // client finally FINACKs
+ f.outgoing(42, 224, 347, FIN|ACK),
+ // remote acknowledges data but not including the FIN
+ f.incoming(0, 347, 224, ACK),
+ // server sends final ACK
+ f.incoming(0, 347, 224+42+1, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ // three-way handshake finishes here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ // passive close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 123 + 100 + 42,
+ RecvBytes: 345,
+ SentPackets: 4,
+ RecvPackets: 5,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestCloseWaitState(t *testing.T) {
+ t.Run("localSeq lt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+ testCloseWaitState(t, f)
+ })
+
+ t.Run("localSeq gt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, higherSeq, lowerSeq)
+ testCloseWaitState(t, f)
+ })
+}
+
+func testFinWait2State(t *testing.T, f *tcpTestFixture) {
+ // test the FinWait2 state, which is when the remote still has data left
+ // to send during an active close
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ // separate ack and first send of data
+ f.incoming(0, 1, 1, ACK),
+ f.incoming(123, 1, 1, ACK),
+ // acknowledge data separately
+ f.outgoing(0, 1, 124, ACK),
+ f.outgoing(345, 1, 124, ACK),
+ // local FINs separately
+ f.outgoing(0, 346, 124, FIN|ACK),
+ // remote acknowledges the FIN but keeps sending data
+ f.incoming(100, 124, 347, ACK),
+ // local acknowledges this data
+ f.outgoing(0, 347, 224, ACK),
+ // remote sends their own FIN
+ f.incoming(0, 224, 347, FIN|ACK),
+ // local sends final ACK
+ f.outgoing(0, 347, 225, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ // three-way handshake finishes here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 345,
+ RecvBytes: 223,
+ SentPackets: 6,
+ RecvPackets: 5,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestFinWait2State(t *testing.T) {
+ t.Run("localSeq lt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+ testFinWait2State(t, f)
+ })
+
+ t.Run("localSeq gt remoteSeq", func(t *testing.T) {
+ f := newTcpTestFixture(t, higherSeq, lowerSeq)
+ testFinWait2State(t, f)
+ })
+}
+
+func TestImmediateFin(t *testing.T) {
+ // originally captured from TestTCPConnsReported which closes connections right as it gets them
+
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.incoming(0, 1, 1, ACK),
+ // active close after sending no data
+ f.outgoing(0, 1, 1, FIN|ACK),
+ f.incoming(0, 1, 2, FIN|ACK),
+ f.outgoing(0, 2, 2, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 3,
+ RecvPackets: 3,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestConnRefusedSyn(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 0, RST|ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Equal(t, f.conn.TCPFailures, map[uint16]uint32{
+ uint16(syscall.ECONNREFUSED): 1,
+ })
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 1,
+ RecvPackets: 1,
+ Retransmits: 0,
+ TCPEstablished: 0,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestConnRefusedSynAck(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.outgoing(0, 0, 0, RST|ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Equal(t, f.conn.TCPFailures, map[uint16]uint32{
+ uint16(syscall.ECONNREFUSED): 1,
+ })
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 2,
+ RecvPackets: 1,
+ Retransmits: 0,
+ TCPEstablished: 0,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestConnReset(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.incoming(0, 1, 1, ACK),
+ // handshake done, now blow up
+ f.outgoing(0, 1, 1, RST|ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // reset
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Equal(t, f.conn.TCPFailures, map[uint16]uint32{
+ uint16(syscall.ECONNRESET): 1,
+ })
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 2,
+ RecvPackets: 2,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestRstRetransmit(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.incoming(0, 1, 1, ACK),
+ // handshake done, now blow up
+ f.outgoing(0, 1, 1, RST|ACK),
+ f.outgoing(0, 1, 1, RST|ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // reset
+ ConnStatClosed,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ // should count as a single failure
+ require.Equal(t, f.conn.TCPFailures, map[uint16]uint32{
+ uint16(syscall.ECONNRESET): 1,
+ })
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 3,
+ RecvPackets: 2,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ // should count as a single closed connection
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestConnectTwice(t *testing.T) {
+ // same as TestImmediateFin but everything happens twice
+
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.incoming(0, 1, 1, ACK),
+ // active close after sending no data
+ f.outgoing(0, 1, 1, FIN|ACK),
+ f.incoming(0, 1, 2, FIN|ACK),
+ f.outgoing(0, 2, 2, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ state := f.tcp.conns[f.conn.ConnectionTuple]
+ // make sure the TCP state was erased after the connection was closed
+ require.Equal(t, connectionState{
+ tcpState: ConnStatClosed,
+ }, state)
+
+ // second connection here
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 3 * 2,
+ RecvPackets: 3 * 2,
+ Retransmits: 0,
+ TCPEstablished: 1 * 2,
+ TCPClosed: 1 * 2,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestSimultaneousClose(t *testing.T) {
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ f.outgoing(0, 0, 1, SYN|ACK),
+ f.incoming(0, 1, 1, ACK),
+ // active close after sending no data
+ f.outgoing(0, 1, 1, FIN|ACK),
+ f.incoming(0, 1, 1, FIN|ACK),
+ f.outgoing(0, 2, 2, ACK),
+ f.incoming(0, 2, 2, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 3,
+ RecvPackets: 4,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
+
+func TestUnusualAckSyn(t *testing.T) {
+ // according to zeek, some unusual clients such as ftp.microsoft.com do the ACK and SYN separately
+ f := newTcpTestFixture(t, lowerSeq, higherSeq)
+
+ basicHandshake := []testCapture{
+ f.incoming(0, 0, 0, SYN),
+ // ACK the first SYN before even sending your own SYN
+ f.outgoing(0, 0, 1, ACK),
+ f.outgoing(0, 0, 1, SYN),
+ f.incoming(0, 1, 1, ACK),
+ // active close after sending no data
+ f.outgoing(0, 1, 1, FIN|ACK),
+ f.incoming(0, 1, 2, FIN|ACK),
+ f.outgoing(0, 2, 2, ACK),
+ }
+
+ expectedClientStates := []ConnStatus{
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatAttempted,
+ ConnStatEstablished,
+ // active close begins here
+ ConnStatEstablished,
+ ConnStatEstablished,
+ ConnStatClosed,
+ }
+
+ f.runAgainstState(basicHandshake, expectedClientStates)
+
+ require.Empty(t, f.conn.TCPFailures)
+
+ expectedStats := network.StatCounters{
+ SentBytes: 0,
+ RecvBytes: 0,
+ SentPackets: 4,
+ RecvPackets: 3,
+ Retransmits: 0,
+ TCPEstablished: 1,
+ TCPClosed: 1,
+ }
+ require.Equal(t, expectedStats, f.conn.Monotonic)
+}
diff --git a/pkg/network/tracer/connection/ebpfless/tcp_utils.go b/pkg/network/tracer/connection/ebpfless/tcp_utils.go
new file mode 100644
index 0000000000000..1f7be6a2db85d
--- /dev/null
+++ b/pkg/network/tracer/connection/ebpfless/tcp_utils.go
@@ -0,0 +1,120 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//go:build linux_bpf
+
+package ebpfless
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/google/gopacket/layers"
+
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+)
+
+const ebpflessModuleName = "ebpfless_network_tracer"
+
+var statsTelemetry = struct {
+ missedTCPConnections telemetry.Counter
+ missingTCPFlags telemetry.Counter
+ tcpSynAndFin telemetry.Counter
+}{
+ telemetry.NewCounter(ebpflessModuleName, "missed_tcp_connections", []string{}, "Counter measuring the number of TCP connections where we missed the SYN handshake"),
+ telemetry.NewCounter(ebpflessModuleName, "missing_tcp_flags", []string{}, "Counter measuring packets encountered with none of SYN, FIN, ACK, RST set"),
+ telemetry.NewCounter(ebpflessModuleName, "tcp_syn_and_fin", []string{}, "Counter measuring packets encountered with SYN+FIN together"),
+}
+
+const tcpSeqMidpoint = 0x80000000
+
+type ConnStatus uint8
+
+const (
+ ConnStatClosed ConnStatus = iota
+ ConnStatAttempted
+ ConnStatEstablished
+)
+
+var connStatusLabels = []string{
+ "Closed",
+ "Attempted",
+ "Established",
+}
+
+type SynState uint8
+
+const (
+ SynStateNone SynState = iota
+ SynStateSent
+ SynStateAcked
+)
+
+func (ss *SynState) update(synFlag, ackFlag bool) {
+ // for simplicity, this does not consider the sequence number of the SYNs and ACKs.
+ // if these matter in the future, change this to store SYN seq numbers
+ if *ss == SynStateNone && synFlag {
+ *ss = SynStateSent
+ }
+ if *ss == SynStateSent && ackFlag {
+ *ss = SynStateAcked
+ }
+ // if we see ACK'd traffic but missed the SYN, assume the connection started before
+ // the datadog-agent starts.
+ if *ss == SynStateNone && ackFlag {
+ statsTelemetry.missedTCPConnections.Inc()
+ *ss = SynStateAcked
+ }
+}
+
+func LabelForState(tcpState ConnStatus) string {
+ idx := int(tcpState)
+ if idx < len(connStatusLabels) {
+ return connStatusLabels[idx]
+ }
+ return "BadState-" + strconv.Itoa(idx)
+}
+
+func isSeqBefore(prev, cur uint32) bool {
+ // check for wraparound with unsigned subtraction
+ diff := cur - prev
+ // constrain the maximum difference to half the number space
+ return diff > 0 && diff < tcpSeqMidpoint
+}
+
+func debugPacketDir(pktType uint8) string {
+ switch pktType {
+ case unix.PACKET_HOST:
+ return "Incoming"
+ case unix.PACKET_OUTGOING:
+ return "Outgoing"
+ default:
+ return "InvalidDir-" + strconv.Itoa(int(pktType))
+ }
+}
+
+func debugTcpFlags(tcp *layers.TCP) string {
+ var flags []string
+ if tcp.RST {
+ flags = append(flags, "RST")
+ }
+ if tcp.FIN {
+ flags = append(flags, "FIN")
+ }
+ if tcp.SYN {
+ flags = append(flags, "SYN")
+ }
+ if tcp.ACK {
+ flags = append(flags, "ACK")
+ }
+ return strings.Join(flags, "|")
+}
+
+func debugPacketInfo(pktType uint8, tcp *layers.TCP, payloadLen uint16) string {
+ return fmt.Sprintf("pktType=%+v ports=(%+v, %+v) size=%d seq=%+v ack=%+v flags=%s", debugPacketDir(pktType), uint16(tcp.SrcPort), uint16(tcp.DstPort), payloadLen, tcp.Seq, tcp.Ack, debugTcpFlags(tcp))
+}
diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go
index 2a460ed44ce62..c4a5e9db93fdf 100644
--- a/pkg/network/tracer/connection/ebpfless_tracer.go
+++ b/pkg/network/tracer/connection/ebpfless_tracer.go
@@ -56,7 +56,7 @@ type ebpfLessTracer struct {
scratchConn *network.ConnectionStats
udp *udpProcessor
- tcp *tcpProcessor
+ tcp *ebpfless.TCPProcessor
// connection maps
conns map[network.ConnectionTuple]*network.ConnectionStats
@@ -81,7 +81,7 @@ func newEbpfLessTracer(cfg *config.Config) (*ebpfLessTracer, error) {
exit: make(chan struct{}),
scratchConn: &network.ConnectionStats{},
udp: &udpProcessor{},
- tcp: newTCPProcessor(),
+ tcp: ebpfless.NewTCPProcessor(),
conns: make(map[network.ConnectionTuple]*network.ConnectionStats, cfg.MaxTrackedConnections),
boundPorts: ebpfless.NewBoundPorts(cfg),
cookieHasher: newCookieHasher(),
@@ -150,6 +150,7 @@ func (t *ebpfLessTracer) processConnection(
) error {
t.scratchConn.Source, t.scratchConn.Dest = util.Address{}, util.Address{}
t.scratchConn.SPort, t.scratchConn.DPort = 0, 0
+ t.scratchConn.TCPFailures = make(map[uint16]uint32)
var udpPresent, tcpPresent bool
for _, layerType := range decoded {
switch layerType {
@@ -209,7 +210,7 @@ func (t *ebpfLessTracer) processConnection(
if (ip4 != nil && !t.config.CollectTCPv4Conns) || (ip6 != nil && !t.config.CollectTCPv6Conns) {
return nil
}
- err = t.tcp.process(conn, pktType, ip4, ip6, tcp)
+ err = t.tcp.Process(conn, pktType, ip4, ip6, tcp)
default:
err = fmt.Errorf("unsupported connection type %d", conn.Type)
}
@@ -357,63 +358,3 @@ func (u *udpProcessor) process(conn *network.ConnectionStats, pktType uint8, udp
return nil
}
-
-type tcpProcessor struct {
- conns map[network.ConnectionTuple]struct {
- established bool
- closed bool
- }
-}
-
-func newTCPProcessor() *tcpProcessor {
- return &tcpProcessor{
- conns: map[network.ConnectionTuple]struct {
- established bool
- closed bool
- }{},
- }
-}
-
-func (t *tcpProcessor) process(conn *network.ConnectionStats, pktType uint8, ip4 *layers.IPv4, ip6 *layers.IPv6, tcp *layers.TCP) error {
- payloadLen, err := ebpfless.TCPPayloadLen(conn.Family, ip4, ip6, tcp)
- if err != nil {
- return err
- }
-
- log.TraceFunc(func() string {
- return fmt.Sprintf("tcp processor: pktType=%+v seq=%+v ack=%+v fin=%+v rst=%+v syn=%+v ack=%+v", pktType, tcp.Seq, tcp.Ack, tcp.FIN, tcp.RST, tcp.SYN, tcp.ACK)
- })
- c := t.conns[conn.ConnectionTuple]
- log.TraceFunc(func() string {
- return fmt.Sprintf("pre ack_seq=%+v", c)
- })
- switch pktType {
- case unix.PACKET_OUTGOING:
- conn.Monotonic.SentPackets++
- conn.Monotonic.SentBytes += uint64(payloadLen)
- case unix.PACKET_HOST:
- conn.Monotonic.RecvPackets++
- conn.Monotonic.RecvBytes += uint64(payloadLen)
- }
-
- if tcp.FIN || tcp.RST {
- if !c.closed {
- c.closed = true
- conn.Monotonic.TCPClosed++
- conn.Duration = time.Duration(time.Now().UnixNano() - int64(conn.Duration))
- }
- delete(t.conns, conn.ConnectionTuple)
- return nil
- }
-
- if !tcp.SYN && !c.established {
- c.established = true
- conn.Monotonic.TCPEstablished++
- }
-
- log.TraceFunc(func() string {
- return fmt.Sprintf("ack_seq=%+v", c)
- })
- t.conns[conn.ConnectionTuple] = c
- return nil
-}
diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go
index 2ef995137748f..b8f141deb7b79 100644
--- a/pkg/network/tracer/tracer.go
+++ b/pkg/network/tracer/tracer.go
@@ -189,7 +189,7 @@ func newTracer(cfg *config.Config, telemetryComponent telemetryComponent.Compone
if cfg.ProtocolClassificationEnabled || usmconfig.IsUSMSupportedAndEnabled(cfg) {
connectionProtocolMap, err := tr.ebpfTracer.GetMap(probes.ConnectionProtocolMap)
if err == nil {
- tr.connectionProtocolMapCleaner, err = setupConnectionProtocolMapCleaner(connectionProtocolMap)
+ tr.connectionProtocolMapCleaner, err = setupConnectionProtocolMapCleaner(connectionProtocolMap, probes.ConnectionProtocolMap)
if err != nil {
log.Warnf("could not set up connection protocol map cleaner: %s", err)
}
@@ -887,8 +887,8 @@ const connProtoCleaningInterval = 5 * time.Minute
// setupConnectionProtocolMapCleaner sets up a map cleaner for the connectionProtocolMap.
// It will run every connProtoCleaningInterval and delete entries older than connProtoTTL.
-func setupConnectionProtocolMapCleaner(connectionProtocolMap *ebpf.Map) (*ddebpf.MapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper], error) {
- mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper](connectionProtocolMap, 1024)
+func setupConnectionProtocolMapCleaner(connectionProtocolMap *ebpf.Map, name string) (*ddebpf.MapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper], error) {
+ mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper](connectionProtocolMap, 1024, name, "npm_tracer")
if err != nil {
return nil, err
}
diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go
index eddeeeec174f2..0a4206e3094f5 100644
--- a/pkg/network/tracer/tracer_linux_test.go
+++ b/pkg/network/tracer/tracer_linux_test.go
@@ -126,8 +126,10 @@ func (s *TracerSuite) TestTCPRemoveEntries() {
func (s *TracerSuite) TestTCPRetransmit() {
t := s.T()
+ cfg := testConfig()
+ skipEbpflessTodo(t, cfg)
// Enable BPF-based system probe
- tr := setupTracer(t, testConfig())
+ tr := setupTracer(t, cfg)
// Create TCP Server which sends back serverMessageSize bytes
server := tracertestutil.NewTCPServer(func(c net.Conn) {
@@ -175,6 +177,8 @@ func (s *TracerSuite) TestTCPRetransmit() {
func (s *TracerSuite) TestTCPRetransmitSharedSocket() {
t := s.T()
+ cfg := testConfig()
+ skipEbpflessTodo(t, cfg)
// Create TCP Server that simply "drains" connection until receiving an EOF
server := tracertestutil.NewTCPServer(func(c net.Conn) {
io.Copy(io.Discard, c)
@@ -203,7 +207,7 @@ func (s *TracerSuite) TestTCPRetransmitSharedSocket() {
// this connection (if there are pid collisions,
// we assign the tcp stats to one connection randomly,
// which is the point of this test)
- tr := setupTracer(t, testConfig())
+ tr := setupTracer(t, cfg)
const numProcesses = 10
iptablesWrapper(t, func() {
@@ -252,8 +256,10 @@ func (s *TracerSuite) TestTCPRTT() {
if ebpftest.GetBuildMode() == ebpftest.Prebuilt {
flake.Mark(t)
}
+ cfg := testConfig()
+ skipEbpflessTodo(t, cfg)
// Enable BPF-based system probe
- tr := setupTracer(t, testConfig())
+ tr := setupTracer(t, cfg)
// Create TCP Server that simply "drains" connection until receiving an EOF
server := tracertestutil.NewTCPServer(func(c net.Conn) {
io.Copy(io.Discard, c)
@@ -1101,6 +1107,10 @@ func (s *TracerSuite) TestSelfConnect() {
// Enable BPF-based system probe
cfg := testConfig()
cfg.TCPConnTimeout = 3 * time.Second
+ // TODO filter out connections in ebpfless where the incoming IP:port == outgoing IP:port because
+ // packet capture can't trace it properly
+ skipEbpflessTodo(t, cfg)
+
tr := setupTracer(t, cfg)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
@@ -2328,6 +2338,7 @@ func BenchmarkAddProcessInfo(b *testing.B) {
func (s *TracerSuite) TestConnectionDuration() {
t := s.T()
cfg := testConfig()
+ skipEbpflessTodo(t, cfg)
tr := setupTracer(t, cfg)
srv := tracertestutil.NewTCPServer(func(c net.Conn) {
diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go
index 51b44ad6ba710..6c09466ef6e48 100644
--- a/pkg/network/tracer/tracer_test.go
+++ b/pkg/network/tracer/tracer_test.go
@@ -256,7 +256,9 @@ func (s *TracerSuite) TestTCPShortLived() {
assert.Equal(t, clientMessageSize, int(m.SentBytes))
assert.Equal(t, serverMessageSize, int(m.RecvBytes))
assert.Equal(t, 0, int(m.Retransmits))
- assert.Equal(t, os.Getpid(), int(conn.Pid))
+ if !tr.config.EnableEbpfless {
+ assert.Equal(t, os.Getpid(), int(conn.Pid))
+ }
assert.Equal(t, addrPort(server.Address()), int(conn.DPort))
assert.Equal(t, network.OUTGOING, conn.Direction)
assert.True(t, conn.IntraHost)
@@ -318,7 +320,9 @@ func (s *TracerSuite) TestTCPOverIPv6() {
assert.Equal(t, clientMessageSize, int(m.SentBytes))
assert.Equal(t, serverMessageSize, int(m.RecvBytes))
assert.Equal(t, 0, int(m.Retransmits))
- assert.Equal(t, os.Getpid(), int(conn.Pid))
+ if !tr.config.EnableEbpfless {
+ assert.Equal(t, os.Getpid(), int(conn.Pid))
+ }
assert.Equal(t, ln.Addr().(*net.TCPAddr).Port, int(conn.DPort))
assert.Equal(t, network.OUTGOING, conn.Direction)
assert.True(t, conn.IntraHost)
@@ -390,14 +394,17 @@ func (s *TracerSuite) TestTCPConnsReported() {
defer c.Close()
<-processedChan
- // Test
- connections := getConnections(t, tr)
- // Server-side
- _, ok := findConnection(c.RemoteAddr(), c.LocalAddr(), connections)
- require.True(t, ok)
- // Client-side
- _, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), connections)
- require.True(t, ok)
+ // for ebpfless, it takes time for the packet capture to arrive, so poll
+ require.Eventually(t, func() bool {
+ // Test
+ connections := getConnections(t, tr)
+ // Server-side
+ _, okForward := findConnection(c.RemoteAddr(), c.LocalAddr(), connections)
+ // Client-side
+ _, okReverse := findConnection(c.LocalAddr(), c.RemoteAddr(), connections)
+ return okForward && okReverse
+ }, 3*time.Second, 100*time.Millisecond, "connection not found")
+
}
func (s *TracerSuite) TestUDPSendAndReceive() {
@@ -1074,8 +1081,14 @@ func (s *TracerSuite) TestTCPEstablished() {
laddr, raddr := c.LocalAddr(), c.RemoteAddr()
c.Write([]byte("hello"))
- connections := getConnections(t, tr)
- conn, ok := findConnection(laddr, raddr, connections)
+ var conn *network.ConnectionStats
+ var ok bool
+
+ // for ebpfless, wait for the packet capture to appear
+ require.Eventually(t, func() bool {
+ conn, ok = findConnection(laddr, raddr, getConnections(t, tr))
+ return ok
+ }, 3*time.Second, 100*time.Millisecond, "couldn't find connection")
require.True(t, ok)
assert.Equal(t, uint16(1), conn.Last.TCPEstablished)
diff --git a/pkg/network/usm/ebpf_gotls_helpers.go b/pkg/network/usm/ebpf_gotls_helpers.go
index 0ed45396b9024..5aa18b83fb2cf 100644
--- a/pkg/network/usm/ebpf_gotls_helpers.go
+++ b/pkg/network/usm/ebpf_gotls_helpers.go
@@ -8,7 +8,6 @@
package usm
import (
- "debug/elf"
"errors"
"fmt"
"os"
@@ -26,6 +25,7 @@ import (
libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry"
"github.com/DataDog/datadog-agent/pkg/network/usm/utils"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
var paramLookupFunctions = map[string]bininspect.ParameterLookupFunction{
@@ -74,7 +74,7 @@ func (p *goTLSBinaryInspector) Inspect(fpath utils.FilePath, requests []uprobes.
}
defer f.Close()
- elfFile, err := elf.NewFile(f)
+ elfFile, err := safeelf.NewFile(f)
if err != nil {
return nil, fmt.Errorf("file %s could not be parsed as an ELF file: %w", path, err)
}
@@ -94,7 +94,7 @@ func (p *goTLSBinaryInspector) Inspect(fpath utils.FilePath, requests []uprobes.
inspectionResult, err := bininspect.InspectNewProcessBinary(elfFile, functionsConfig, p.structFieldsLookupFunctions)
if err != nil {
- if errors.Is(err, elf.ErrNoSymbols) {
+ if errors.Is(err, safeelf.ErrNoSymbols) {
p.binNoSymbolsMetric.Add(1)
}
return nil, fmt.Errorf("error extracting inspection data from %s: %w", path, err)
diff --git a/pkg/network/usm/ebpf_ssl.go b/pkg/network/usm/ebpf_ssl.go
index aed0ffac49216..6873ff4818e93 100644
--- a/pkg/network/usm/ebpf_ssl.go
+++ b/pkg/network/usm/ebpf_ssl.go
@@ -9,7 +9,6 @@ package usm
import (
"bytes"
- "debug/elf"
"fmt"
"io"
"os"
@@ -37,6 +36,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/common"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
ddsync "github.com/DataDog/datadog-agent/pkg/util/sync"
)
@@ -617,7 +617,7 @@ func addHooks(m *manager.Manager, procRoot string, probes []manager.ProbesSelect
uid := getUID(fpath.ID)
- elfFile, err := elf.Open(fpath.HostPath)
+ elfFile, err := safeelf.Open(fpath.HostPath)
if err != nil {
return err
}
@@ -697,7 +697,7 @@ func addHooks(m *manager.Manager, procRoot string, probes []manager.ProbesSelect
continue
}
}
- manager.SanitizeUprobeAddresses(elfFile, []elf.Symbol{sym})
+ manager.SanitizeUprobeAddresses(elfFile.File, []safeelf.Symbol{sym})
offset, err := bininspect.SymbolToOffset(elfFile, sym)
if err != nil {
return err
diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go
index 1775d937161d8..564bd1adced94 100644
--- a/pkg/network/usm/usm_http2_monitor_test.go
+++ b/pkg/network/usm/usm_http2_monitor_test.go
@@ -1925,11 +1925,11 @@ func dialHTTP2Server(t *testing.T) net.Conn {
// getHTTP2KernelTelemetry returns the HTTP2 kernel telemetry
func getHTTP2KernelTelemetry(monitor *Monitor, isTLS bool) (*usmhttp2.HTTP2Telemetry, error) {
http2Telemetry := &usmhttp2.HTTP2Telemetry{}
- var zero uint32
mapName := usmhttp2.TelemetryMap
+ key := uint32(0)
if isTLS {
- mapName = usmhttp2.TLSTelemetryMap
+ key = uint32(1)
}
mp, _, err := monitor.ebpfProgram.GetMap(mapName)
@@ -1937,7 +1937,7 @@ func getHTTP2KernelTelemetry(monitor *Monitor, isTLS bool) (*usmhttp2.HTTP2Telem
return nil, fmt.Errorf("unable to get %q map: %s", mapName, err)
}
- if err := mp.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(http2Telemetry)); err != nil {
+ if err := mp.Lookup(unsafe.Pointer(&key), unsafe.Pointer(http2Telemetry)); err != nil {
return nil, fmt.Errorf("unable to lookup %q map: %s", mapName, err)
}
return http2Telemetry, nil
diff --git a/pkg/networkpath/traceroute/config/config.go b/pkg/networkpath/traceroute/config/config.go
new file mode 100644
index 0000000000000..f773c2516e6e4
--- /dev/null
+++ b/pkg/networkpath/traceroute/config/config.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package config is the configuration for the traceroute functionality
+package config
+
+import (
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/networkpath/payload"
+)
+
+// Config specifies the configuration of an instance
+// of Traceroute
+type Config struct {
+ // TODO: add common configuration
+ // Destination Hostname
+ DestHostname string
+ // Destination Port number
+ DestPort uint16
+ // Destination service name
+ DestinationService string
+ // Source service name
+ SourceService string
+ // Source container ID
+ SourceContainerID string
+ // Max number of hops to try
+ MaxTTL uint8
+ // TODO: do we want to expose this?
+ Timeout time.Duration
+ // Protocol is the protocol to use
+ // for traceroute, default is UDP
+ Protocol payload.Protocol
+}
diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner/runner.go
similarity index 92%
rename from pkg/networkpath/traceroute/runner.go
rename to pkg/networkpath/traceroute/runner/runner.go
index ef211ef98610c..9087b66b4eb8a 100644
--- a/pkg/networkpath/traceroute/runner.go
+++ b/pkg/networkpath/traceroute/runner/runner.go
@@ -1,9 +1,10 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
+// Copyright 2024-present Datadog, Inc.
-package traceroute
+// Package runner is the functionality for actually performing traceroutes
+package runner
import (
"context"
@@ -15,7 +16,6 @@ import (
"sort"
"time"
- "github.com/DataDog/datadog-agent/pkg/version"
"github.com/Datadog/dublin-traceroute/go/dublintraceroute/probes/probev4"
"github.com/Datadog/dublin-traceroute/go/dublintraceroute/results"
"github.com/vishvananda/netns"
@@ -24,6 +24,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/network"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/tcp"
"github.com/DataDog/datadog-agent/pkg/process/util"
"github.com/DataDog/datadog-agent/pkg/telemetry"
@@ -32,6 +33,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/util/hostname"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/version"
)
const (
@@ -45,21 +47,17 @@ const (
DefaultMinTTL = 1
// DefaultDelay defines the default delay
DefaultDelay = 50 //msec
- // DefaultOutputFormat defines the default output format
- DefaultOutputFormat = "json"
tracerouteRunnerModuleName = "traceroute_runner__"
)
// Telemetry
var tracerouteRunnerTelemetry = struct {
- runs *telemetry.StatCounterWrapper
- failedRuns *telemetry.StatCounterWrapper
- reverseDNSTimetouts *telemetry.StatCounterWrapper
+ runs *telemetry.StatCounterWrapper
+ failedRuns *telemetry.StatCounterWrapper
}{
telemetry.NewStatCounterWrapper(tracerouteRunnerModuleName, "runs", []string{}, "Counter measuring the number of traceroutes run"),
telemetry.NewStatCounterWrapper(tracerouteRunnerModuleName, "failed_runs", []string{}, "Counter measuring the number of traceroute run failures"),
- telemetry.NewStatCounterWrapper(tracerouteRunnerModuleName, "reverse_dns_timeouts", []string{}, "Counter measuring the number of traceroute reverse DNS timeouts"),
}
// Runner executes traceroutes
@@ -69,8 +67,8 @@ type Runner struct {
networkID string
}
-// NewRunner initializes a new traceroute runner
-func NewRunner(telemetryComp telemetryComponent.Component) (*Runner, error) {
+// New initializes a new traceroute runner
+func New(telemetryComp telemetryComponent.Component) (*Runner, error) {
var err error
var networkID string
if ec2.IsRunningOn(context.TODO()) {
@@ -100,7 +98,7 @@ func NewRunner(telemetryComp telemetryComponent.Component) (*Runner, error) {
//
// This code is experimental and will be replaced with a more
// complete implementation.
-func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.NetworkPath, error) {
+func (r *Runner) RunTraceroute(ctx context.Context, cfg config.Config) (payload.NetworkPath, error) {
defer tracerouteRunnerTelemetry.runs.Inc()
dests, err := net.DefaultResolver.LookupIP(ctx, "ip4", cfg.DestHostname)
if err != nil || len(dests) == 0 {
@@ -164,7 +162,7 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.Network
return pathResult, nil
}
-func (r *Runner) runUDP(cfg Config, hname string, dest net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) {
+func (r *Runner) runUDP(cfg config.Config, hname string, dest net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) {
destPort, srcPort, useSourcePort := getPorts(cfg.DestPort)
dt := &probev4.UDPv4{
@@ -194,7 +192,7 @@ func (r *Runner) runUDP(cfg Config, hname string, dest net.IP, maxTTL uint8, tim
return pathResult, nil
}
-func (r *Runner) runTCP(cfg Config, hname string, target net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) {
+func (r *Runner) runTCP(cfg config.Config, hname string, target net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) {
destPort := cfg.DestPort
if destPort == 0 {
destPort = 80 // TODO: is this the default we want?
diff --git a/pkg/networkpath/traceroute/runner_test.go b/pkg/networkpath/traceroute/runner/runner_test.go
similarity index 92%
rename from pkg/networkpath/traceroute/runner_test.go
rename to pkg/networkpath/traceroute/runner/runner_test.go
index 86f7b99f9c68a..4b0a512e7a969 100644
--- a/pkg/networkpath/traceroute/runner_test.go
+++ b/pkg/networkpath/traceroute/runner/runner_test.go
@@ -1,9 +1,9 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2023-present Datadog, Inc.
+// Copyright 2024-present Datadog, Inc.
-package traceroute
+package runner
import (
"testing"
diff --git a/pkg/networkpath/traceroute/traceroute.go b/pkg/networkpath/traceroute/traceroute.go
index 5fb757fc88f71..d94ddc6d55152 100644
--- a/pkg/networkpath/traceroute/traceroute.go
+++ b/pkg/networkpath/traceroute/traceroute.go
@@ -1,45 +1,7 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
+// Copyright 2024-present Datadog, Inc.
// Package traceroute adds traceroute functionality to the agent
package traceroute
-
-import (
- "context"
- "time"
-
- "github.com/DataDog/datadog-agent/pkg/networkpath/payload"
-)
-
-type (
- // Config specifies the configuration of an instance
- // of Traceroute
- Config struct {
- // TODO: add common configuration
- // Destination Hostname
- DestHostname string
- // Destination Port number
- DestPort uint16
- // Destination service name
- DestinationService string
- // Source service name
- SourceService string
- // Source container ID
- SourceContainerID string
- // Max number of hops to try
- MaxTTL uint8
- // TODO: do we want to expose this?
- Timeout time.Duration
- // Protocol is the protocol to use
- // for traceroute, default is UDP
- Protocol payload.Protocol
- }
-
- // Traceroute defines an interface for running
- // traceroutes for the Network Path integration
- Traceroute interface {
- Run(context.Context) (payload.NetworkPath, error)
- }
-)
diff --git a/pkg/networkpath/traceroute/traceroute_darwin.go b/pkg/networkpath/traceroute/traceroute_darwin.go
index cf0ef5d3e325e..38be9352f1715 100644
--- a/pkg/networkpath/traceroute/traceroute_darwin.go
+++ b/pkg/networkpath/traceroute/traceroute_darwin.go
@@ -13,6 +13,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/telemetry"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/runner"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -24,21 +26,21 @@ const (
// running traceroute from an agent running
// on macOS
type MacTraceroute struct {
- cfg Config
- runner *Runner
+ cfg config.Config
+ runner *runner.Runner
}
// New creates a new instance of MacTraceroute
// based on an input configuration
-func New(cfg Config, telemetry telemetry.Component) (*MacTraceroute, error) {
+func New(cfg config.Config, telemetry telemetry.Component) (*MacTraceroute, error) {
log.Debugf("Creating new traceroute with config: %+v", cfg)
- runner, err := NewRunner(telemetry)
+ runner, err := runner.New(telemetry)
if err != nil {
return nil, err
}
// TCP is not supported at the moment due to the
- // way go listensn for TCP in our implementation on BSD systems
+ // way go listens for TCP in our implementation on BSD systems
if cfg.Protocol == payload.ProtocolTCP {
return nil, fmt.Errorf(tcpNotSupportedMsg)
}
diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go
index 547f0fa2ff501..536d360f4f6c3 100644
--- a/pkg/networkpath/traceroute/traceroute_linux.go
+++ b/pkg/networkpath/traceroute/traceroute_linux.go
@@ -14,6 +14,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/telemetry"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/process/net"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -26,12 +27,12 @@ const (
// running traceroute from an agent running
// on Linux
type LinuxTraceroute struct {
- cfg Config
+ cfg config.Config
}
// New creates a new instance of LinuxTraceroute
// based on an input configuration
-func New(cfg Config, _ telemetry.Component) (*LinuxTraceroute, error) {
+func New(cfg config.Config, _ telemetry.Component) (*LinuxTraceroute, error) {
log.Debugf("Creating new traceroute with config: %+v", cfg)
return &LinuxTraceroute{
cfg: cfg,
diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go
index 089f46d216766..8820ea1de8549 100644
--- a/pkg/networkpath/traceroute/traceroute_windows.go
+++ b/pkg/networkpath/traceroute/traceroute_windows.go
@@ -14,6 +14,7 @@ import (
"github.com/DataDog/datadog-agent/comp/core/telemetry"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
"github.com/DataDog/datadog-agent/pkg/networkpath/payload"
+ "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config"
"github.com/DataDog/datadog-agent/pkg/process/net"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -26,12 +27,12 @@ const (
// running traceroute from an agent running
// on Windows
type WindowsTraceroute struct {
- cfg Config
+ cfg config.Config
}
// New creates a new instance of WindowsTraceroute
// based on an input configuration
-func New(cfg Config, _ telemetry.Component) (*WindowsTraceroute, error) {
+func New(cfg config.Config, _ telemetry.Component) (*WindowsTraceroute, error) {
log.Debugf("Creating new traceroute with config: %+v", cfg)
return &WindowsTraceroute{
cfg: cfg,
diff --git a/pkg/networkpath/traceroute/utils.go b/pkg/networkpath/traceroute/utils.go
index 760c3a03acc38..764a4b48f31a3 100644
--- a/pkg/networkpath/traceroute/utils.go
+++ b/pkg/networkpath/traceroute/utils.go
@@ -11,6 +11,12 @@ import (
"net"
"strings"
"time"
+
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+)
+
+var (
+ reverseDNSTimeouts = telemetry.NewStatCounterWrapper("traceroute", "reverse_dns_timeouts", []string{}, "Counter measuring the number of traceroute reverse DNS timeouts")
)
var lookupAddrFn = net.DefaultResolver.LookupAddr
@@ -30,7 +36,7 @@ func GetHostname(ipAddr string) string {
defer cancel()
currHostList, err := lookupAddrFn(ctx, ipAddr)
if errors.Is(err, context.Canceled) {
- tracerouteRunnerTelemetry.reverseDNSTimetouts.Inc()
+ reverseDNSTimeouts.Inc()
}
if len(currHostList) > 0 {
diff --git a/pkg/process/checks/process_test.go b/pkg/process/checks/process_test.go
index b54f53b1ff3e2..c1f4be9e64388 100644
--- a/pkg/process/checks/process_test.go
+++ b/pkg/process/checks/process_test.go
@@ -20,7 +20,7 @@ import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock"
@@ -86,7 +86,7 @@ func mockContainerProvider(t *testing.T) proccontainers.ContainerProvider {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// Finally, container provider
filter, err := containers.GetPauseContainerFilter()
diff --git a/pkg/process/util/containers/containers.go b/pkg/process/util/containers/containers.go
index 962037789758b..2beafeced27f5 100644
--- a/pkg/process/util/containers/containers.go
+++ b/pkg/process/util/containers/containers.go
@@ -12,7 +12,7 @@ import (
model "github.com/DataDog/agent-payload/v5/process"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
"github.com/DataDog/datadog-agent/pkg/util/containers"
diff --git a/pkg/process/util/containers/containers_test.go b/pkg/process/util/containers/containers_test.go
index 0de7a55879890..b71440371fd52 100644
--- a/pkg/process/util/containers/containers_test.go
+++ b/pkg/process/util/containers/containers_test.go
@@ -16,7 +16,7 @@ import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/core"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock"
@@ -46,7 +46,7 @@ func TestGetContainers(t *testing.T) {
workloadmetafxmock.MockModule(workloadmeta.NewParams()),
))
- fakeTagger := taggerimpl.SetupFakeTagger(t)
+ fakeTagger := taggerMock.SetupFakeTagger(t)
// Finally, container provider
testTime := time.Now()
diff --git a/pkg/proto/datadog/api/v1/api.proto b/pkg/proto/datadog/api/v1/api.proto
index 44512c96dc099..f6c16ff4cc45e 100644
--- a/pkg/proto/datadog/api/v1/api.proto
+++ b/pkg/proto/datadog/api/v1/api.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
package datadog.api.v1;
import "datadog/model/v1/model.proto";
+import "datadog/remoteagent/remoteagent.proto";
import "datadog/remoteconfig/remoteconfig.proto";
import "datadog/workloadmeta/workloadmeta.proto";
import "google/api/annotations.proto";
@@ -144,4 +145,21 @@ service AgentSecure {
body: "*"
};
};
+
+ // Registers a remote agent.
+ rpc RegisterRemoteAgent(datadog.remoteagent.RegisterRemoteAgentRequest) returns (datadog.remoteagent.RegisterRemoteAgentResponse) {
+ option (google.api.http) = {
+ post: "/v1/grpc/remoteagent/register_remote_agent"
+ body: "*"
+ };
+ };
+}
+
+// Service exposed by remote agents to allow querying by the Core Agent.
+service RemoteAgent {
+ // Gets the status details of a remote agent.
+ rpc GetStatusDetails(datadog.remoteagent.GetStatusDetailsRequest) returns (datadog.remoteagent.GetStatusDetailsResponse);
+
+ // Gets all relevant flare files of a remote agent.
+ rpc GetFlareFiles(datadog.remoteagent.GetFlareFilesRequest) returns (datadog.remoteagent.GetFlareFilesResponse);
}
diff --git a/pkg/proto/datadog/remoteagent/remoteagent.proto b/pkg/proto/datadog/remoteagent/remoteagent.proto
new file mode 100644
index 0000000000000..1de4120527308
--- /dev/null
+++ b/pkg/proto/datadog/remoteagent/remoteagent.proto
@@ -0,0 +1,74 @@
+syntax = "proto3";
+
+package datadog.remoteagent;
+
+option go_package = "pkg/proto/pbgo/core"; // golang
+
+message StatusSection {
+ map fields = 1;
+}
+
+message RegisterRemoteAgentRequest {
+ // Unique ID of the remote agent.
+ //
+ // SHOULD be semi-human-readable, with a unique component, such as the process name followed by a UUID:
+ // otel-agent-0192de13-3d66-7cbc-9b4f-1b74f7b8a467.
+ string id = 1;
+
+ // Human-friendly display name of the remote agent.
+ //
+ // SHOULD be the common name for the remote agent, such as OpenTelemetry Collector Agent.
+ string display_name = 2;
+
+ // gRPC endpoint address to reach the remote agent at.
+ //
+ // MUST be a valid gRPC endpoint address, such as "localhost:4317"
+ // MUST be exposing the `RemoteAgent` service.
+ // MUST be secured with TLS, and SHOULD present a valid certificate where possible.
+ string api_endpoint = 3;
+
+ // Authentication token to be used when connecting to the remote agent's gRPC endpoint.
+ //
+ // The remote agent's gRPC endpoint MUST check that this authentication token was provided as a bearer token in all
+ // requests made to the endpoint. If the token is not provided, the remote agent SHOULD reject the request.
+ //
+ // SHOULD be a unique string value that is generated randomly before a remote agent registers itself for the first time.
+ string auth_token = 4;
+}
+
+message RegisterRemoteAgentResponse {
+ // Recommended refresh interval for the remote agent.
+ //
+ // This is the interval at which the remote agent should call the RegisterRemoteAgent RPC in order to assert that the
+ // remote agent is live and healthy.
+ //
+ // The remote agent SHOULD refresh its status every `recommended_refresh_interval_secs` seconds.
+ uint32 recommended_refresh_interval_secs = 1;
+}
+
+message GetStatusDetailsRequest {}
+
+message GetStatusDetailsResponse {
+ // Main status detail section.
+ //
+ // Generally reserved for high-level details such as version, uptime, configuration flags, etc.
+ StatusSection main_section = 1;
+
+ // Named status detail sections.
+ //
+ // Generally reserved for specific (sub)component details, such as the status of a specific feature or integration, etc.
+ map named_sections = 2;
+}
+
+message GetFlareFilesRequest {}
+
+message GetFlareFilesResponse {
+ // Set of files to add to the flare.
+ //
+ // The key is the name of the file, and the value is the contents of the file.
+ //
+ // The key SHOULD be an ASCII string with no path separators (`/`), and will be sanitized as necessary to ensure it can be
+ // used as a valid filename. The key SHOULD have a file extension that is applicable to the file contents, such as
+ // `.yaml` for YAML data.
+ map files = 1;
+}
diff --git a/pkg/proto/pbgo/core/api.pb.go b/pkg/proto/pbgo/core/api.pb.go
index 46229348979b6..87ed4b2b3620e 100644
--- a/pkg/proto/pbgo/core/api.pb.go
+++ b/pkg/proto/pbgo/core/api.pb.go
@@ -32,127 +32,161 @@ var file_datadog_api_v1_api_proto_rawDesc = []byte{
0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x6f, 0x64,
- 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
- 0x67, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72,
- 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x1a, 0x27, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c,
- 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64,
- 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x71, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x68,
- 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x2e,
+ 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
+ 0x67, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x27, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
+ 0x67, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x77,
+ 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x71, 0x0a, 0x05,
+ 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x68, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x73, 0x74,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d,
+ 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
+ 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x6e,
+ 0x61, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f,
+ 0x12, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x6f, 0x73, 0x74, 0x32,
+ 0xe2, 0x0b, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x12,
+ 0x8f, 0x01, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x45, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64,
+ 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31,
- 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c,
- 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c,
- 0x79, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x12, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x67,
- 0x72, 0x70, 0x63, 0x2f, 0x68, 0x6f, 0x73, 0x74, 0x32, 0xb0, 0x0a, 0x0a, 0x0b, 0x41, 0x67, 0x65,
- 0x6e, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x14, 0x54, 0x61, 0x67,
- 0x67, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65,
- 0x73, 0x12, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65,
- 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
- 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70,
- 0x63, 0x2f, 0x74, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x30, 0x01, 0x12, 0x89, 0x01, 0x0a, 0x11, 0x54,
- 0x61, 0x67, 0x67, 0x65, 0x72, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x24, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c,
- 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
- 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x3a, 0x01, 0x2a, 0x22, 0x1c, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72,
- 0x70, 0x63, 0x2f, 0x74, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x9b, 0x01, 0x0a, 0x17, 0x44, 0x6f, 0x67, 0x73, 0x74,
- 0x61, 0x74, 0x73, 0x64, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67,
- 0x65, 0x72, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64,
+ 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f,
+ 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2f,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x30,
+ 0x01, 0x12, 0x89, 0x01, 0x0a, 0x11, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x46, 0x65, 0x74, 0x63,
+ 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x24, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
+ 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68,
+ 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31,
+ 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x3a, 0x01, 0x2a, 0x22,
+ 0x1c, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x2f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x9b, 0x01,
+ 0x0a, 0x17, 0x44, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x43, 0x61, 0x70, 0x74, 0x75,
+ 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x70,
+ 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64,
0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69,
- 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43,
- 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x3a, 0x01, 0x2a,
- 0x22, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x64, 0x6f, 0x67, 0x73, 0x74,
- 0x61, 0x74, 0x73, 0x64, 0x2f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x2f, 0x74, 0x72, 0x69,
- 0x67, 0x67, 0x65, 0x72, 0x12, 0x8c, 0x01, 0x0a, 0x17, 0x44, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74,
- 0x73, 0x64, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x12, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c,
- 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a,
- 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e,
- 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01,
- 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x64, 0x6f, 0x67, 0x73,
- 0x74, 0x61, 0x74, 0x73, 0x64, 0x2f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x2f, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x10, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64,
+ 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x27, 0x3a, 0x01, 0x2a, 0x22, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2f, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x2f, 0x63, 0x61, 0x70, 0x74,
+ 0x75, 0x72, 0x65, 0x2f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x8c, 0x01, 0x0a, 0x17,
+ 0x44, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x67,
+ 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
+ 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2f, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x2f, 0x63, 0x61, 0x70,
+ 0x74, 0x75, 0x72, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x10, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
+ 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64,
0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63,
- 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x78, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a,
- 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a,
- 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d,
- 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12,
- 0x94, 0x01, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x73, 0x48, 0x41, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f,
+ 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x78, 0x0a, 0x0e,
+ 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x67,
+ 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x48, 0x41, 0x12, 0x27, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
- 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72,
- 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x5f, 0x68, 0x61, 0x12, 0x7d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x41, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
- 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x5f, 0x68, 0x61, 0x12, 0xb3, 0x01, 0x0a, 0x1a, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f,
- 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x74, 0x69,
- 0x74, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x77,
- 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b,
- 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e,
- 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72,
- 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a,
- 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x77, 0x6f, 0x72,
- 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x30, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70,
- 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f,
- 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x31,
+ 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x5f, 0x68, 0x61, 0x12, 0x7d, 0x0a,
+ 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48,
+ 0x41, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76,
+ 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x12, 0xb3, 0x01, 0x0a,
+ 0x1a, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65,
+ 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d,
+ 0x65, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x67,
+ 0x72, 0x70, 0x63, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61,
+ 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73,
+ 0x30, 0x01, 0x12, 0xaf, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x2e, 0x64, 0x61, 0x74,
+ 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x35, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x22, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x32, 0xe6, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64,
+ 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47,
+ 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72,
+ 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74,
+ 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65,
+ 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x15, 0x5a,
+ 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_datadog_api_v1_api_proto_goTypes = []interface{}{
- (*HostnameRequest)(nil), // 0: datadog.model.v1.HostnameRequest
- (*StreamTagsRequest)(nil), // 1: datadog.model.v1.StreamTagsRequest
- (*FetchEntityRequest)(nil), // 2: datadog.model.v1.FetchEntityRequest
- (*CaptureTriggerRequest)(nil), // 3: datadog.model.v1.CaptureTriggerRequest
- (*TaggerState)(nil), // 4: datadog.model.v1.TaggerState
- (*ClientGetConfigsRequest)(nil), // 5: datadog.config.ClientGetConfigsRequest
- (*empty.Empty)(nil), // 6: google.protobuf.Empty
- (*WorkloadmetaStreamRequest)(nil), // 7: datadog.workloadmeta.WorkloadmetaStreamRequest
- (*HostnameReply)(nil), // 8: datadog.model.v1.HostnameReply
- (*StreamTagsResponse)(nil), // 9: datadog.model.v1.StreamTagsResponse
- (*FetchEntityResponse)(nil), // 10: datadog.model.v1.FetchEntityResponse
- (*CaptureTriggerResponse)(nil), // 11: datadog.model.v1.CaptureTriggerResponse
- (*TaggerStateResponse)(nil), // 12: datadog.model.v1.TaggerStateResponse
- (*ClientGetConfigsResponse)(nil), // 13: datadog.config.ClientGetConfigsResponse
- (*GetStateConfigResponse)(nil), // 14: datadog.config.GetStateConfigResponse
- (*WorkloadmetaStreamResponse)(nil), // 15: datadog.workloadmeta.WorkloadmetaStreamResponse
+ (*HostnameRequest)(nil), // 0: datadog.model.v1.HostnameRequest
+ (*StreamTagsRequest)(nil), // 1: datadog.model.v1.StreamTagsRequest
+ (*FetchEntityRequest)(nil), // 2: datadog.model.v1.FetchEntityRequest
+ (*CaptureTriggerRequest)(nil), // 3: datadog.model.v1.CaptureTriggerRequest
+ (*TaggerState)(nil), // 4: datadog.model.v1.TaggerState
+ (*ClientGetConfigsRequest)(nil), // 5: datadog.config.ClientGetConfigsRequest
+ (*empty.Empty)(nil), // 6: google.protobuf.Empty
+ (*WorkloadmetaStreamRequest)(nil), // 7: datadog.workloadmeta.WorkloadmetaStreamRequest
+ (*RegisterRemoteAgentRequest)(nil), // 8: datadog.remoteagent.RegisterRemoteAgentRequest
+ (*GetStatusDetailsRequest)(nil), // 9: datadog.remoteagent.GetStatusDetailsRequest
+ (*GetFlareFilesRequest)(nil), // 10: datadog.remoteagent.GetFlareFilesRequest
+ (*HostnameReply)(nil), // 11: datadog.model.v1.HostnameReply
+ (*StreamTagsResponse)(nil), // 12: datadog.model.v1.StreamTagsResponse
+ (*FetchEntityResponse)(nil), // 13: datadog.model.v1.FetchEntityResponse
+ (*CaptureTriggerResponse)(nil), // 14: datadog.model.v1.CaptureTriggerResponse
+ (*TaggerStateResponse)(nil), // 15: datadog.model.v1.TaggerStateResponse
+ (*ClientGetConfigsResponse)(nil), // 16: datadog.config.ClientGetConfigsResponse
+ (*GetStateConfigResponse)(nil), // 17: datadog.config.GetStateConfigResponse
+ (*WorkloadmetaStreamResponse)(nil), // 18: datadog.workloadmeta.WorkloadmetaStreamResponse
+ (*RegisterRemoteAgentResponse)(nil), // 19: datadog.remoteagent.RegisterRemoteAgentResponse
+ (*GetStatusDetailsResponse)(nil), // 20: datadog.remoteagent.GetStatusDetailsResponse
+ (*GetFlareFilesResponse)(nil), // 21: datadog.remoteagent.GetFlareFilesResponse
}
var file_datadog_api_v1_api_proto_depIdxs = []int32{
0, // 0: datadog.api.v1.Agent.GetHostname:input_type -> datadog.model.v1.HostnameRequest
@@ -165,18 +199,24 @@ var file_datadog_api_v1_api_proto_depIdxs = []int32{
5, // 7: datadog.api.v1.AgentSecure.ClientGetConfigsHA:input_type -> datadog.config.ClientGetConfigsRequest
6, // 8: datadog.api.v1.AgentSecure.GetConfigStateHA:input_type -> google.protobuf.Empty
7, // 9: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:input_type -> datadog.workloadmeta.WorkloadmetaStreamRequest
- 8, // 10: datadog.api.v1.Agent.GetHostname:output_type -> datadog.model.v1.HostnameReply
- 9, // 11: datadog.api.v1.AgentSecure.TaggerStreamEntities:output_type -> datadog.model.v1.StreamTagsResponse
- 10, // 12: datadog.api.v1.AgentSecure.TaggerFetchEntity:output_type -> datadog.model.v1.FetchEntityResponse
- 11, // 13: datadog.api.v1.AgentSecure.DogstatsdCaptureTrigger:output_type -> datadog.model.v1.CaptureTriggerResponse
- 12, // 14: datadog.api.v1.AgentSecure.DogstatsdSetTaggerState:output_type -> datadog.model.v1.TaggerStateResponse
- 13, // 15: datadog.api.v1.AgentSecure.ClientGetConfigs:output_type -> datadog.config.ClientGetConfigsResponse
- 14, // 16: datadog.api.v1.AgentSecure.GetConfigState:output_type -> datadog.config.GetStateConfigResponse
- 13, // 17: datadog.api.v1.AgentSecure.ClientGetConfigsHA:output_type -> datadog.config.ClientGetConfigsResponse
- 14, // 18: datadog.api.v1.AgentSecure.GetConfigStateHA:output_type -> datadog.config.GetStateConfigResponse
- 15, // 19: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:output_type -> datadog.workloadmeta.WorkloadmetaStreamResponse
- 10, // [10:20] is the sub-list for method output_type
- 0, // [0:10] is the sub-list for method input_type
+ 8, // 10: datadog.api.v1.AgentSecure.RegisterRemoteAgent:input_type -> datadog.remoteagent.RegisterRemoteAgentRequest
+ 9, // 11: datadog.api.v1.RemoteAgent.GetStatusDetails:input_type -> datadog.remoteagent.GetStatusDetailsRequest
+ 10, // 12: datadog.api.v1.RemoteAgent.GetFlareFiles:input_type -> datadog.remoteagent.GetFlareFilesRequest
+ 11, // 13: datadog.api.v1.Agent.GetHostname:output_type -> datadog.model.v1.HostnameReply
+ 12, // 14: datadog.api.v1.AgentSecure.TaggerStreamEntities:output_type -> datadog.model.v1.StreamTagsResponse
+ 13, // 15: datadog.api.v1.AgentSecure.TaggerFetchEntity:output_type -> datadog.model.v1.FetchEntityResponse
+ 14, // 16: datadog.api.v1.AgentSecure.DogstatsdCaptureTrigger:output_type -> datadog.model.v1.CaptureTriggerResponse
+ 15, // 17: datadog.api.v1.AgentSecure.DogstatsdSetTaggerState:output_type -> datadog.model.v1.TaggerStateResponse
+ 16, // 18: datadog.api.v1.AgentSecure.ClientGetConfigs:output_type -> datadog.config.ClientGetConfigsResponse
+ 17, // 19: datadog.api.v1.AgentSecure.GetConfigState:output_type -> datadog.config.GetStateConfigResponse
+ 16, // 20: datadog.api.v1.AgentSecure.ClientGetConfigsHA:output_type -> datadog.config.ClientGetConfigsResponse
+ 17, // 21: datadog.api.v1.AgentSecure.GetConfigStateHA:output_type -> datadog.config.GetStateConfigResponse
+ 18, // 22: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:output_type -> datadog.workloadmeta.WorkloadmetaStreamResponse
+ 19, // 23: datadog.api.v1.AgentSecure.RegisterRemoteAgent:output_type -> datadog.remoteagent.RegisterRemoteAgentResponse
+ 20, // 24: datadog.api.v1.RemoteAgent.GetStatusDetails:output_type -> datadog.remoteagent.GetStatusDetailsResponse
+ 21, // 25: datadog.api.v1.RemoteAgent.GetFlareFiles:output_type -> datadog.remoteagent.GetFlareFilesResponse
+ 13, // [13:26] is the sub-list for method output_type
+ 0, // [0:13] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
@@ -188,6 +228,7 @@ func file_datadog_api_v1_api_proto_init() {
return
}
file_datadog_model_v1_model_proto_init()
+ file_datadog_remoteagent_remoteagent_proto_init()
file_datadog_remoteconfig_remoteconfig_proto_init()
file_datadog_workloadmeta_workloadmeta_proto_init()
type x struct{}
@@ -198,7 +239,7 @@ func file_datadog_api_v1_api_proto_init() {
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
- NumServices: 2,
+ NumServices: 3,
},
GoTypes: file_datadog_api_v1_api_proto_goTypes,
DependencyIndexes: file_datadog_api_v1_api_proto_depIdxs,
@@ -373,6 +414,8 @@ type AgentSecureClient interface {
// --data '{"filter":{"source":3}}' \
// https://localhost:5001/v1/grpc/workloadmeta/stream_entities
WorkloadmetaStreamEntities(ctx context.Context, in *WorkloadmetaStreamRequest, opts ...grpc.CallOption) (AgentSecure_WorkloadmetaStreamEntitiesClient, error)
+ // Registers a remote agent.
+ RegisterRemoteAgent(ctx context.Context, in *RegisterRemoteAgentRequest, opts ...grpc.CallOption) (*RegisterRemoteAgentResponse, error)
}
type agentSecureClient struct {
@@ -510,6 +553,15 @@ func (x *agentSecureWorkloadmetaStreamEntitiesClient) Recv() (*WorkloadmetaStrea
return m, nil
}
+func (c *agentSecureClient) RegisterRemoteAgent(ctx context.Context, in *RegisterRemoteAgentRequest, opts ...grpc.CallOption) (*RegisterRemoteAgentResponse, error) {
+ out := new(RegisterRemoteAgentResponse)
+ err := c.cc.Invoke(ctx, "/datadog.api.v1.AgentSecure/RegisterRemoteAgent", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// AgentSecureServer is the server API for AgentSecure service.
type AgentSecureServer interface {
// subscribes to added, removed, or changed entities in the Tagger
@@ -590,6 +642,8 @@ type AgentSecureServer interface {
// --data '{"filter":{"source":3}}' \
// https://localhost:5001/v1/grpc/workloadmeta/stream_entities
WorkloadmetaStreamEntities(*WorkloadmetaStreamRequest, AgentSecure_WorkloadmetaStreamEntitiesServer) error
+ // Registers a remote agent.
+ RegisterRemoteAgent(context.Context, *RegisterRemoteAgentRequest) (*RegisterRemoteAgentResponse, error)
}
// UnimplementedAgentSecureServer can be embedded to have forward compatible implementations.
@@ -623,6 +677,9 @@ func (*UnimplementedAgentSecureServer) GetConfigStateHA(context.Context, *empty.
func (*UnimplementedAgentSecureServer) WorkloadmetaStreamEntities(*WorkloadmetaStreamRequest, AgentSecure_WorkloadmetaStreamEntitiesServer) error {
return status.Errorf(codes.Unimplemented, "method WorkloadmetaStreamEntities not implemented")
}
+func (*UnimplementedAgentSecureServer) RegisterRemoteAgent(context.Context, *RegisterRemoteAgentRequest) (*RegisterRemoteAgentResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RegisterRemoteAgent not implemented")
+}
func RegisterAgentSecureServer(s *grpc.Server, srv AgentSecureServer) {
s.RegisterService(&_AgentSecure_serviceDesc, srv)
@@ -796,6 +853,24 @@ func (x *agentSecureWorkloadmetaStreamEntitiesServer) Send(m *WorkloadmetaStream
return x.ServerStream.SendMsg(m)
}
+func _AgentSecure_RegisterRemoteAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegisterRemoteAgentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AgentSecureServer).RegisterRemoteAgent(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/datadog.api.v1.AgentSecure/RegisterRemoteAgent",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AgentSecureServer).RegisterRemoteAgent(ctx, req.(*RegisterRemoteAgentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _AgentSecure_serviceDesc = grpc.ServiceDesc{
ServiceName: "datadog.api.v1.AgentSecure",
HandlerType: (*AgentSecureServer)(nil),
@@ -828,6 +903,10 @@ var _AgentSecure_serviceDesc = grpc.ServiceDesc{
MethodName: "GetConfigStateHA",
Handler: _AgentSecure_GetConfigStateHA_Handler,
},
+ {
+ MethodName: "RegisterRemoteAgent",
+ Handler: _AgentSecure_RegisterRemoteAgent_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -843,3 +922,115 @@ var _AgentSecure_serviceDesc = grpc.ServiceDesc{
},
Metadata: "datadog/api/v1/api.proto",
}
+
+// RemoteAgentClient is the client API for RemoteAgent service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type RemoteAgentClient interface {
+ // Gets the status details of a remote agent.
+ GetStatusDetails(ctx context.Context, in *GetStatusDetailsRequest, opts ...grpc.CallOption) (*GetStatusDetailsResponse, error)
+ // Gets all relevant flare files of a remote agent.
+ GetFlareFiles(ctx context.Context, in *GetFlareFilesRequest, opts ...grpc.CallOption) (*GetFlareFilesResponse, error)
+}
+
+type remoteAgentClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewRemoteAgentClient(cc grpc.ClientConnInterface) RemoteAgentClient {
+ return &remoteAgentClient{cc}
+}
+
+func (c *remoteAgentClient) GetStatusDetails(ctx context.Context, in *GetStatusDetailsRequest, opts ...grpc.CallOption) (*GetStatusDetailsResponse, error) {
+ out := new(GetStatusDetailsResponse)
+ err := c.cc.Invoke(ctx, "/datadog.api.v1.RemoteAgent/GetStatusDetails", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *remoteAgentClient) GetFlareFiles(ctx context.Context, in *GetFlareFilesRequest, opts ...grpc.CallOption) (*GetFlareFilesResponse, error) {
+ out := new(GetFlareFilesResponse)
+ err := c.cc.Invoke(ctx, "/datadog.api.v1.RemoteAgent/GetFlareFiles", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// RemoteAgentServer is the server API for RemoteAgent service.
+type RemoteAgentServer interface {
+ // Gets the status details of a remote agent.
+ GetStatusDetails(context.Context, *GetStatusDetailsRequest) (*GetStatusDetailsResponse, error)
+ // Gets all relevant flare files of a remote agent.
+ GetFlareFiles(context.Context, *GetFlareFilesRequest) (*GetFlareFilesResponse, error)
+}
+
+// UnimplementedRemoteAgentServer can be embedded to have forward compatible implementations.
+type UnimplementedRemoteAgentServer struct {
+}
+
+func (*UnimplementedRemoteAgentServer) GetStatusDetails(context.Context, *GetStatusDetailsRequest) (*GetStatusDetailsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetStatusDetails not implemented")
+}
+func (*UnimplementedRemoteAgentServer) GetFlareFiles(context.Context, *GetFlareFilesRequest) (*GetFlareFilesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetFlareFiles not implemented")
+}
+
+func RegisterRemoteAgentServer(s *grpc.Server, srv RemoteAgentServer) {
+ s.RegisterService(&_RemoteAgent_serviceDesc, srv)
+}
+
+func _RemoteAgent_GetStatusDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetStatusDetailsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RemoteAgentServer).GetStatusDetails(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/datadog.api.v1.RemoteAgent/GetStatusDetails",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RemoteAgentServer).GetStatusDetails(ctx, req.(*GetStatusDetailsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _RemoteAgent_GetFlareFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetFlareFilesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RemoteAgentServer).GetFlareFiles(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/datadog.api.v1.RemoteAgent/GetFlareFiles",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RemoteAgentServer).GetFlareFiles(ctx, req.(*GetFlareFilesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _RemoteAgent_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "datadog.api.v1.RemoteAgent",
+ HandlerType: (*RemoteAgentServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetStatusDetails",
+ Handler: _RemoteAgent_GetStatusDetails_Handler,
+ },
+ {
+ MethodName: "GetFlareFiles",
+ Handler: _RemoteAgent_GetFlareFiles_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "datadog/api/v1/api.proto",
+}
diff --git a/pkg/proto/pbgo/core/api.pb.gw.go b/pkg/proto/pbgo/core/api.pb.gw.go
index a1db6c3c8fe39..fb843e96988c3 100644
--- a/pkg/proto/pbgo/core/api.pb.gw.go
+++ b/pkg/proto/pbgo/core/api.pb.gw.go
@@ -338,6 +338,40 @@ func request_AgentSecure_WorkloadmetaStreamEntities_0(ctx context.Context, marsh
}
+func request_AgentSecure_RegisterRemoteAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq RegisterRemoteAgentRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RegisterRemoteAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_AgentSecure_RegisterRemoteAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq RegisterRemoteAgentRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.RegisterRemoteAgent(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
// RegisterAgentHandlerServer registers the http handlers for service Agent to "mux".
// UnaryRPC :call AgentServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -525,6 +559,26 @@ func RegisterAgentSecureHandlerServer(ctx context.Context, mux *runtime.ServeMux
return
})
+ mux.Handle("POST", pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_AgentSecure_RegisterRemoteAgent_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_AgentSecure_RegisterRemoteAgent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -815,6 +869,26 @@ func RegisterAgentSecureHandlerClient(ctx context.Context, mux *runtime.ServeMux
})
+ mux.Handle("POST", pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_AgentSecure_RegisterRemoteAgent_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_AgentSecure_RegisterRemoteAgent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -836,6 +910,8 @@ var (
pattern_AgentSecure_GetConfigStateHA_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "state_ha"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "workloadmeta", "stream_entities"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_AgentSecure_RegisterRemoteAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteagent", "register_remote_agent"}, "", runtime.AssumeColonVerbOpt(true)))
)
var (
@@ -856,4 +932,6 @@ var (
forward_AgentSecure_GetConfigStateHA_0 = runtime.ForwardResponseMessage
forward_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.ForwardResponseStream
+
+ forward_AgentSecure_RegisterRemoteAgent_0 = runtime.ForwardResponseMessage
)
diff --git a/pkg/proto/pbgo/core/remoteagent.pb.go b/pkg/proto/pbgo/core/remoteagent.pb.go
new file mode 100644
index 0000000000000..0893ee22f3ec9
--- /dev/null
+++ b/pkg/proto/pbgo/core/remoteagent.pb.go
@@ -0,0 +1,619 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.0
+// protoc v5.26.1
+// source: datadog/remoteagent/remoteagent.proto
+
+package core
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type StatusSection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Fields map[string]string `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *StatusSection) Reset() {
+ *x = StatusSection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusSection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusSection) ProtoMessage() {}
+
+func (x *StatusSection) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusSection.ProtoReflect.Descriptor instead.
+func (*StatusSection) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StatusSection) GetFields() map[string]string {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+type RegisterRemoteAgentRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Unique ID of the remote agent.
+ //
+ // SHOULD be semi-human-readable, with a unique component, such as the process name followed by a UUID:
+ // otel-agent-0192de13-3d66-7cbc-9b4f-1b74f7b8a467.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Human-friendly display name of the remote agent.
+ //
+ // SHOULD be the common name for the remote agent, such as OpenTelemetry Collector Agent.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // gRPC endpoint address to reach the remote agent at.
+ //
+ // MUST be a valid gRPC endpoint address, such as "localhost:4317"
+ // MUST be exposing the `RemoteAgent` service.
+ // MUST be secured with TLS, and SHOULD present a valid certificate where possible.
+ ApiEndpoint string `protobuf:"bytes,3,opt,name=api_endpoint,json=apiEndpoint,proto3" json:"api_endpoint,omitempty"`
+ // Authentication token to be used when connecting to the remote agent's gRPC endpoint.
+ //
+ // The remote agent's gRPC endpoint MUST check that this authentication token was provided as a bearer token in all
+ // requests made to the endpoint. If the token is not provided, the remote agent SHOULD reject the request.
+ //
+ // SHOULD be a unique string value that is generated randomly before a remote agent registers itself for the first time.
+ AuthToken string `protobuf:"bytes,4,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"`
+}
+
+func (x *RegisterRemoteAgentRequest) Reset() {
+ *x = RegisterRemoteAgentRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterRemoteAgentRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterRemoteAgentRequest) ProtoMessage() {}
+
+func (x *RegisterRemoteAgentRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterRemoteAgentRequest.ProtoReflect.Descriptor instead.
+func (*RegisterRemoteAgentRequest) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RegisterRemoteAgentRequest) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *RegisterRemoteAgentRequest) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *RegisterRemoteAgentRequest) GetApiEndpoint() string {
+ if x != nil {
+ return x.ApiEndpoint
+ }
+ return ""
+}
+
+func (x *RegisterRemoteAgentRequest) GetAuthToken() string {
+ if x != nil {
+ return x.AuthToken
+ }
+ return ""
+}
+
+type RegisterRemoteAgentResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Recommended refresh interval for the remote agent.
+ //
+ // This is the interval at which the remote agent should call the RegisterRemoteAgent RPC in order to assert that the
+ // remote agent is live and healthy.
+ //
+ // The remote agent SHOULD refresh its status every `recommended_refresh_interval_secs` seconds.
+ RecommendedRefreshIntervalSecs uint32 `protobuf:"varint,1,opt,name=recommended_refresh_interval_secs,json=recommendedRefreshIntervalSecs,proto3" json:"recommended_refresh_interval_secs,omitempty"`
+}
+
+func (x *RegisterRemoteAgentResponse) Reset() {
+ *x = RegisterRemoteAgentResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterRemoteAgentResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterRemoteAgentResponse) ProtoMessage() {}
+
+func (x *RegisterRemoteAgentResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterRemoteAgentResponse.ProtoReflect.Descriptor instead.
+func (*RegisterRemoteAgentResponse) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *RegisterRemoteAgentResponse) GetRecommendedRefreshIntervalSecs() uint32 {
+ if x != nil {
+ return x.RecommendedRefreshIntervalSecs
+ }
+ return 0
+}
+
+type GetStatusDetailsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetStatusDetailsRequest) Reset() {
+ *x = GetStatusDetailsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetStatusDetailsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetStatusDetailsRequest) ProtoMessage() {}
+
+func (x *GetStatusDetailsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetStatusDetailsRequest.ProtoReflect.Descriptor instead.
+func (*GetStatusDetailsRequest) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{3}
+}
+
+type GetStatusDetailsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Main status detail section.
+ //
+ // Generally reserved for high-level details such as version, uptime, configuration flags, etc.
+ MainSection *StatusSection `protobuf:"bytes,1,opt,name=main_section,json=mainSection,proto3" json:"main_section,omitempty"`
+ // Named status detail sections.
+ //
+ // Generally reserved for specific (sub)component details, such as the status of a specific feature or integration, etc.
+ NamedSections map[string]*StatusSection `protobuf:"bytes,2,rep,name=named_sections,json=namedSections,proto3" json:"named_sections,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GetStatusDetailsResponse) Reset() {
+ *x = GetStatusDetailsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetStatusDetailsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetStatusDetailsResponse) ProtoMessage() {}
+
+func (x *GetStatusDetailsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetStatusDetailsResponse.ProtoReflect.Descriptor instead.
+func (*GetStatusDetailsResponse) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *GetStatusDetailsResponse) GetMainSection() *StatusSection {
+ if x != nil {
+ return x.MainSection
+ }
+ return nil
+}
+
+func (x *GetStatusDetailsResponse) GetNamedSections() map[string]*StatusSection {
+ if x != nil {
+ return x.NamedSections
+ }
+ return nil
+}
+
+type GetFlareFilesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetFlareFilesRequest) Reset() {
+ *x = GetFlareFilesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFlareFilesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFlareFilesRequest) ProtoMessage() {}
+
+func (x *GetFlareFilesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFlareFilesRequest.ProtoReflect.Descriptor instead.
+func (*GetFlareFilesRequest) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{5}
+}
+
+type GetFlareFilesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Set of files to add to the flare.
+ //
+ // The key is the name of the file, and the value is the contents of the file.
+ //
+ // The key SHOULD be an ASCII string with no path separators (`/`), and will be sanitized as necessary to ensure it can be
+ // used as a valid filename. The key SHOULD have a file extension that is applicable to the file contents, such as
+ // `.yaml` for YAML data.
+ Files map[string][]byte `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GetFlareFilesResponse) Reset() {
+ *x = GetFlareFilesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFlareFilesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFlareFilesResponse) ProtoMessage() {}
+
+func (x *GetFlareFilesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_datadog_remoteagent_remoteagent_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFlareFilesResponse.ProtoReflect.Descriptor instead.
+func (*GetFlareFilesResponse) Descriptor() ([]byte, []int) {
+ return file_datadog_remoteagent_remoteagent_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetFlareFilesResponse) GetFiles() map[string][]byte {
+ if x != nil {
+ return x.Files
+ }
+ return nil
+}
+
+var File_datadog_remoteagent_remoteagent_proto protoreflect.FileDescriptor
+
+var file_datadog_remoteagent_remoteagent_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x22, 0x92, 0x01, 0x0a,
+ 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46,
+ 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x91, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e,
+ 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x45, 0x6e,
+ 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x68, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x21, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
+ 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x1e, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x66, 0x72,
+ 0x65, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x73, 0x22,
+ 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb0, 0x02, 0x0a, 0x18, 0x47,
+ 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0c, 0x6d, 0x61, 0x69, 0x6e, 0x5f,
+ 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67,
+ 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0b, 0x6d, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x67,
+ 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x64, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64,
+ 0x53, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x16, 0x0a,
+ 0x14, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61,
+ 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4b, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x1a, 0x38, 0x0a, 0x0a,
+ 0x46, 0x69, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_datadog_remoteagent_remoteagent_proto_rawDescOnce sync.Once
+ file_datadog_remoteagent_remoteagent_proto_rawDescData = file_datadog_remoteagent_remoteagent_proto_rawDesc
+)
+
+func file_datadog_remoteagent_remoteagent_proto_rawDescGZIP() []byte {
+ file_datadog_remoteagent_remoteagent_proto_rawDescOnce.Do(func() {
+ file_datadog_remoteagent_remoteagent_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_remoteagent_remoteagent_proto_rawDescData)
+ })
+ return file_datadog_remoteagent_remoteagent_proto_rawDescData
+}
+
+var file_datadog_remoteagent_remoteagent_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_datadog_remoteagent_remoteagent_proto_goTypes = []interface{}{
+ (*StatusSection)(nil), // 0: datadog.remoteagent.StatusSection
+ (*RegisterRemoteAgentRequest)(nil), // 1: datadog.remoteagent.RegisterRemoteAgentRequest
+ (*RegisterRemoteAgentResponse)(nil), // 2: datadog.remoteagent.RegisterRemoteAgentResponse
+ (*GetStatusDetailsRequest)(nil), // 3: datadog.remoteagent.GetStatusDetailsRequest
+ (*GetStatusDetailsResponse)(nil), // 4: datadog.remoteagent.GetStatusDetailsResponse
+ (*GetFlareFilesRequest)(nil), // 5: datadog.remoteagent.GetFlareFilesRequest
+ (*GetFlareFilesResponse)(nil), // 6: datadog.remoteagent.GetFlareFilesResponse
+ nil, // 7: datadog.remoteagent.StatusSection.FieldsEntry
+ nil, // 8: datadog.remoteagent.GetStatusDetailsResponse.NamedSectionsEntry
+ nil, // 9: datadog.remoteagent.GetFlareFilesResponse.FilesEntry
+}
+var file_datadog_remoteagent_remoteagent_proto_depIdxs = []int32{
+ 7, // 0: datadog.remoteagent.StatusSection.fields:type_name -> datadog.remoteagent.StatusSection.FieldsEntry
+ 0, // 1: datadog.remoteagent.GetStatusDetailsResponse.main_section:type_name -> datadog.remoteagent.StatusSection
+ 8, // 2: datadog.remoteagent.GetStatusDetailsResponse.named_sections:type_name -> datadog.remoteagent.GetStatusDetailsResponse.NamedSectionsEntry
+ 9, // 3: datadog.remoteagent.GetFlareFilesResponse.files:type_name -> datadog.remoteagent.GetFlareFilesResponse.FilesEntry
+ 0, // 4: datadog.remoteagent.GetStatusDetailsResponse.NamedSectionsEntry.value:type_name -> datadog.remoteagent.StatusSection
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_datadog_remoteagent_remoteagent_proto_init() }
+func file_datadog_remoteagent_remoteagent_proto_init() {
+ if File_datadog_remoteagent_remoteagent_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusSection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterRemoteAgentRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterRemoteAgentResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetStatusDetailsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetStatusDetailsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFlareFilesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_datadog_remoteagent_remoteagent_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetFlareFilesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_datadog_remoteagent_remoteagent_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 10,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_datadog_remoteagent_remoteagent_proto_goTypes,
+ DependencyIndexes: file_datadog_remoteagent_remoteagent_proto_depIdxs,
+ MessageInfos: file_datadog_remoteagent_remoteagent_proto_msgTypes,
+ }.Build()
+ File_datadog_remoteagent_remoteagent_proto = out.File
+ file_datadog_remoteagent_remoteagent_proto_rawDesc = nil
+ file_datadog_remoteagent_remoteagent_proto_goTypes = nil
+ file_datadog_remoteagent_remoteagent_proto_depIdxs = nil
+}
diff --git a/pkg/proto/pbgo/core/remoteconfig.pb.go b/pkg/proto/pbgo/core/remoteconfig.pb.go
index c5910deb8fe1f..e81826f5763ca 100644
--- a/pkg/proto/pbgo/core/remoteconfig.pb.go
+++ b/pkg/proto/pbgo/core/remoteconfig.pb.go
@@ -2344,7 +2344,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP() []byte {
var file_datadog_remoteconfig_remoteconfig_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_datadog_remoteconfig_remoteconfig_proto_msgTypes = make([]protoimpl.MessageInfo, 29)
-var file_datadog_remoteconfig_remoteconfig_proto_goTypes = []any{
+var file_datadog_remoteconfig_remoteconfig_proto_goTypes = []interface{}{
(TaskState)(0), // 0: datadog.config.TaskState
(*ConfigMetas)(nil), // 1: datadog.config.ConfigMetas
(*DirectorMetas)(nil), // 2: datadog.config.DirectorMetas
@@ -2423,7 +2423,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ConfigMetas); i {
case 0:
return &v.state
@@ -2435,7 +2435,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DirectorMetas); i {
case 0:
return &v.state
@@ -2447,7 +2447,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DelegatedMeta); i {
case 0:
return &v.state
@@ -2459,7 +2459,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TopMeta); i {
case 0:
return &v.state
@@ -2471,7 +2471,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*File); i {
case 0:
return &v.state
@@ -2483,7 +2483,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LatestConfigsRequest); i {
case 0:
return &v.state
@@ -2495,7 +2495,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LatestConfigsResponse); i {
case 0:
return &v.state
@@ -2507,7 +2507,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OrgDataResponse); i {
case 0:
return &v.state
@@ -2519,7 +2519,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OrgStatusResponse); i {
case 0:
return &v.state
@@ -2531,7 +2531,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Client); i {
case 0:
return &v.state
@@ -2543,7 +2543,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientTracer); i {
case 0:
return &v.state
@@ -2555,7 +2555,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[11].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientAgent); i {
case 0:
return &v.state
@@ -2567,7 +2567,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[12].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientUpdater); i {
case 0:
return &v.state
@@ -2579,7 +2579,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[13].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PackageState); i {
case 0:
return &v.state
@@ -2591,7 +2591,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PackageStateTask); i {
case 0:
return &v.state
@@ -2603,7 +2603,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskError); i {
case 0:
return &v.state
@@ -2615,7 +2615,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ConfigState); i {
case 0:
return &v.state
@@ -2627,7 +2627,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientState); i {
case 0:
return &v.state
@@ -2639,7 +2639,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TargetFileHash); i {
case 0:
return &v.state
@@ -2651,7 +2651,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TargetFileMeta); i {
case 0:
return &v.state
@@ -2663,7 +2663,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientGetConfigsRequest); i {
case 0:
return &v.state
@@ -2675,7 +2675,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientGetConfigsResponse); i {
case 0:
return &v.state
@@ -2687,7 +2687,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FileMetaState); i {
case 0:
return &v.state
@@ -2699,7 +2699,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetStateConfigResponse); i {
case 0:
return &v.state
@@ -2711,7 +2711,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerPredicateV1); i {
case 0:
return &v.state
@@ -2723,7 +2723,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() {
return nil
}
}
- file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25].Exporter = func(v any, i int) any {
+ file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerPredicates); i {
case 0:
return &v.state
diff --git a/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go
index b3ecb4fc4529a..f63731fc7bb98 100644
--- a/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go
+++ b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go
@@ -239,6 +239,26 @@ func (mr *MockAgentSecureClientMockRecorder) GetConfigStateHA(ctx, in interface{
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigStateHA", reflect.TypeOf((*MockAgentSecureClient)(nil).GetConfigStateHA), varargs...)
}
+// RegisterRemoteAgent mocks base method.
+func (m *MockAgentSecureClient) RegisterRemoteAgent(ctx context.Context, in *core.RegisterRemoteAgentRequest, opts ...grpc.CallOption) (*core.RegisterRemoteAgentResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{ctx, in}
+ for _, a := range opts {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RegisterRemoteAgent", varargs...)
+ ret0, _ := ret[0].(*core.RegisterRemoteAgentResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RegisterRemoteAgent indicates an expected call of RegisterRemoteAgent.
+func (mr *MockAgentSecureClientMockRecorder) RegisterRemoteAgent(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{ctx, in}, opts...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRemoteAgent", reflect.TypeOf((*MockAgentSecureClient)(nil).RegisterRemoteAgent), varargs...)
+}
+
// TaggerFetchEntity mocks base method.
func (m *MockAgentSecureClient) TaggerFetchEntity(ctx context.Context, in *core.FetchEntityRequest, opts ...grpc.CallOption) (*core.FetchEntityResponse, error) {
m.ctrl.T.Helper()
@@ -658,6 +678,21 @@ func (mr *MockAgentSecureServerMockRecorder) GetConfigStateHA(arg0, arg1 interfa
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigStateHA", reflect.TypeOf((*MockAgentSecureServer)(nil).GetConfigStateHA), arg0, arg1)
}
+// RegisterRemoteAgent mocks base method.
+func (m *MockAgentSecureServer) RegisterRemoteAgent(arg0 context.Context, arg1 *core.RegisterRemoteAgentRequest) (*core.RegisterRemoteAgentResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RegisterRemoteAgent", arg0, arg1)
+ ret0, _ := ret[0].(*core.RegisterRemoteAgentResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RegisterRemoteAgent indicates an expected call of RegisterRemoteAgent.
+func (mr *MockAgentSecureServerMockRecorder) RegisterRemoteAgent(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRemoteAgent", reflect.TypeOf((*MockAgentSecureServer)(nil).RegisterRemoteAgent), arg0, arg1)
+}
+
// TaggerFetchEntity mocks base method.
func (m *MockAgentSecureServer) TaggerFetchEntity(arg0 context.Context, arg1 *core.FetchEntityRequest) (*core.FetchEntityResponse, error) {
m.ctrl.T.Helper()
@@ -938,3 +973,119 @@ func (mr *MockAgentSecure_WorkloadmetaStreamEntitiesServerMockRecorder) SetTrail
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockAgentSecure_WorkloadmetaStreamEntitiesServer)(nil).SetTrailer), arg0)
}
+
+// MockRemoteAgentClient is a mock of RemoteAgentClient interface.
+type MockRemoteAgentClient struct {
+ ctrl *gomock.Controller
+ recorder *MockRemoteAgentClientMockRecorder
+}
+
+// MockRemoteAgentClientMockRecorder is the mock recorder for MockRemoteAgentClient.
+type MockRemoteAgentClientMockRecorder struct {
+ mock *MockRemoteAgentClient
+}
+
+// NewMockRemoteAgentClient creates a new mock instance.
+func NewMockRemoteAgentClient(ctrl *gomock.Controller) *MockRemoteAgentClient {
+ mock := &MockRemoteAgentClient{ctrl: ctrl}
+ mock.recorder = &MockRemoteAgentClientMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockRemoteAgentClient) EXPECT() *MockRemoteAgentClientMockRecorder {
+ return m.recorder
+}
+
+// GetFlareFiles mocks base method.
+func (m *MockRemoteAgentClient) GetFlareFiles(ctx context.Context, in *core.GetFlareFilesRequest, opts ...grpc.CallOption) (*core.GetFlareFilesResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{ctx, in}
+ for _, a := range opts {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetFlareFiles", varargs...)
+ ret0, _ := ret[0].(*core.GetFlareFilesResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetFlareFiles indicates an expected call of GetFlareFiles.
+func (mr *MockRemoteAgentClientMockRecorder) GetFlareFiles(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{ctx, in}, opts...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFlareFiles", reflect.TypeOf((*MockRemoteAgentClient)(nil).GetFlareFiles), varargs...)
+}
+
+// GetStatusDetails mocks base method.
+func (m *MockRemoteAgentClient) GetStatusDetails(ctx context.Context, in *core.GetStatusDetailsRequest, opts ...grpc.CallOption) (*core.GetStatusDetailsResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{ctx, in}
+ for _, a := range opts {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetStatusDetails", varargs...)
+ ret0, _ := ret[0].(*core.GetStatusDetailsResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetStatusDetails indicates an expected call of GetStatusDetails.
+func (mr *MockRemoteAgentClientMockRecorder) GetStatusDetails(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{ctx, in}, opts...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusDetails", reflect.TypeOf((*MockRemoteAgentClient)(nil).GetStatusDetails), varargs...)
+}
+
+// MockRemoteAgentServer is a mock of RemoteAgentServer interface.
+type MockRemoteAgentServer struct {
+ ctrl *gomock.Controller
+ recorder *MockRemoteAgentServerMockRecorder
+}
+
+// MockRemoteAgentServerMockRecorder is the mock recorder for MockRemoteAgentServer.
+type MockRemoteAgentServerMockRecorder struct {
+ mock *MockRemoteAgentServer
+}
+
+// NewMockRemoteAgentServer creates a new mock instance.
+func NewMockRemoteAgentServer(ctrl *gomock.Controller) *MockRemoteAgentServer {
+ mock := &MockRemoteAgentServer{ctrl: ctrl}
+ mock.recorder = &MockRemoteAgentServerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockRemoteAgentServer) EXPECT() *MockRemoteAgentServerMockRecorder {
+ return m.recorder
+}
+
+// GetFlareFiles mocks base method.
+func (m *MockRemoteAgentServer) GetFlareFiles(arg0 context.Context, arg1 *core.GetFlareFilesRequest) (*core.GetFlareFilesResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetFlareFiles", arg0, arg1)
+ ret0, _ := ret[0].(*core.GetFlareFilesResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetFlareFiles indicates an expected call of GetFlareFiles.
+func (mr *MockRemoteAgentServerMockRecorder) GetFlareFiles(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFlareFiles", reflect.TypeOf((*MockRemoteAgentServer)(nil).GetFlareFiles), arg0, arg1)
+}
+
+// GetStatusDetails mocks base method.
+func (m *MockRemoteAgentServer) GetStatusDetails(arg0 context.Context, arg1 *core.GetStatusDetailsRequest) (*core.GetStatusDetailsResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetStatusDetails", arg0, arg1)
+ ret0, _ := ret[0].(*core.GetStatusDetailsResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetStatusDetails indicates an expected call of GetStatusDetails.
+func (mr *MockRemoteAgentServerMockRecorder) GetStatusDetails(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusDetails", reflect.TypeOf((*MockRemoteAgentServer)(nil).GetStatusDetails), arg0, arg1)
+}
diff --git a/pkg/remoteconfig/state/products.go b/pkg/remoteconfig/state/products.go
index 120229ecf2d39..ddb429cf565bb 100644
--- a/pkg/remoteconfig/state/products.go
+++ b/pkg/remoteconfig/state/products.go
@@ -9,6 +9,7 @@ var validProducts = map[string]struct{}{
ProductUpdaterCatalogDD: {},
ProductUpdaterAgent: {},
ProductUpdaterTask: {},
+ ProductActionPlatformRunnerKeys: {},
ProductAgentConfig: {},
ProductAgentFailover: {},
ProductAgentTask: {},
@@ -40,6 +41,8 @@ const (
ProductUpdaterAgent = "UPDATER_AGENT"
// ProductUpdaterTask is the product used to receive tasks to execute
ProductUpdaterTask = "UPDATER_TASK"
+ // ProductActionPlatformRunnerKeys is to receive signing keys for the action platform "private action runner"
+ ProductActionPlatformRunnerKeys = "AP_RUNNER_KEYS"
// ProductAgentConfig is to receive agent configurations, like the log level
ProductAgentConfig = "AGENT_CONFIG"
// ProductAgentFailover is to receive the multi-region failover configuration
diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go
index c28f98dc7f473..99d8b75d86aad 100644
--- a/pkg/security/config/config.go
+++ b/pkg/security/config/config.go
@@ -269,6 +269,9 @@ type RuntimeSecurityConfig struct {
// WindowsProbeChannelUnbuffered defines if the windows probe channel should be unbuffered
WindowsProbeBlockOnChannelSend bool
+ WindowsWriteEventRateLimiterMaxAllowed int
+ WindowsWriteEventRateLimiterPeriod time.Duration
+
// IMDSIPv4 is used to provide a custom IP address for the IMDS endpoint
IMDSIPv4 uint32
}
@@ -324,13 +327,16 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) {
}
rsConfig := &RuntimeSecurityConfig{
- RuntimeEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"),
- FIMEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.fim_enabled"),
- WindowsFilenameCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"),
- WindowsRegistryCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"),
- ETWEventsChannelSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"),
- ETWEventsMaxBuffers: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_max_buffers"),
- WindowsProbeBlockOnChannelSend: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"),
+ RuntimeEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"),
+ FIMEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.fim_enabled"),
+
+ // Windows specific
+ WindowsFilenameCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"),
+ WindowsRegistryCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"),
+ ETWEventsChannelSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"),
+ WindowsProbeBlockOnChannelSend: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"),
+ WindowsWriteEventRateLimiterMaxAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_write_event_rate_limiter_max_allowed"),
+ WindowsWriteEventRateLimiterPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.windows_write_event_rate_limiter_period"),
SocketPath: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.socket"),
EventServerBurst: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.event_server.burst"),
diff --git a/pkg/security/module/ecs_tags.go b/pkg/security/module/ecs_tags.go
index 6d9db97c81c7e..92564bd944acf 100644
--- a/pkg/security/module/ecs_tags.go
+++ b/pkg/security/module/ecs_tags.go
@@ -10,6 +10,7 @@ package module
import (
"context"
+ "strings"
"time"
ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata"
@@ -29,10 +30,24 @@ func getCurrentECSTaskTags() (map[string]string, error) {
return nil, err
}
+ cont, err := client.GetContainer(ctx)
+ if err != nil {
+ return nil, err
+ }
+ imageName := cont.Name
+ imageTag := ""
+ image := strings.Split(cont.Image, ":")
+ if len(image) == 2 {
+ imageName = image[0]
+ imageTag = image[1]
+ }
+
return map[string]string{
"task_name": task.Family,
"task_family": task.Family,
"task_arn": task.TaskARN,
"task_version": task.Version,
+ "image_name": imageName,
+ "image_tag": imageTag,
}, nil
}
diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go
index 8a5942074a7f0..c312bbdea3181 100644
--- a/pkg/security/probe/config/config.go
+++ b/pkg/security/probe/config/config.go
@@ -82,9 +82,6 @@ type Config struct {
// DentryCacheSize is the size of the user space dentry cache
DentryCacheSize int
- // RemoteTaggerEnabled defines whether the remote tagger is enabled
- RemoteTaggerEnabled bool
-
// NOTE(safchain) need to revisit this one as it can impact multiple event consumers
// EnvsWithValue lists environnement variables that will be fully exported
EnvsWithValue []string
@@ -171,7 +168,6 @@ func NewConfig() (*Config, error) {
ERPCDentryResolutionEnabled: getBool("erpc_dentry_resolution_enabled"),
MapDentryResolutionEnabled: getBool("map_dentry_resolution_enabled"),
DentryCacheSize: getInt("dentry_cache_size"),
- RemoteTaggerEnabled: getBool("remote_tagger"),
RuntimeMonitor: getBool("runtime_monitor.enabled"),
NetworkLazyInterfacePrefixes: getStringSlice("network.lazy_interface_prefixes"),
NetworkClassifierPriority: uint16(getInt("network.classifier_priority")),
diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json
index bf2a945016104..00d66492a9624 100644
--- a/pkg/security/probe/constantfetch/btfhub/constants.json
+++ b/pkg/security/probe/constantfetch/btfhub/constants.json
@@ -11876,6 +11876,13 @@
"uname_release": "4.14.352-268.569.amzn2.aarch64",
"cindex": 3
},
+ {
+ "distrib": "amzn",
+ "version": "2",
+ "arch": "arm64",
+ "uname_release": "4.14.353-270.569.amzn2.aarch64",
+ "cindex": 3
+ },
{
"distrib": "amzn",
"version": "2",
@@ -12695,6 +12702,13 @@
"uname_release": "4.14.352-268.569.amzn2.x86_64",
"cindex": 8
},
+ {
+ "distrib": "amzn",
+ "version": "2",
+ "arch": "x86_64",
+ "uname_release": "4.14.353-270.569.amzn2.x86_64",
+ "cindex": 8
+ },
{
"distrib": "amzn",
"version": "2",
@@ -13416,6 +13430,20 @@
"uname_release": "4.14.352-190.568.amzn1.x86_64",
"cindex": 15
},
+ {
+ "distrib": "amzn",
+ "version": "2018",
+ "arch": "x86_64",
+ "uname_release": "4.14.352-190.569.amzn1.x86_64",
+ "cindex": 15
+ },
+ {
+ "distrib": "amzn",
+ "version": "2018",
+ "arch": "x86_64",
+ "uname_release": "4.14.353-190.569.amzn1.x86_64",
+ "cindex": 15
+ },
{
"distrib": "amzn",
"version": "2018",
@@ -20913,6 +20941,13 @@
"uname_release": "4.1.12-124.90.3.el7uek.x86_64",
"cindex": 94
},
+ {
+ "distrib": "ol",
+ "version": "7",
+ "arch": "x86_64",
+ "uname_release": "4.1.12-124.91.3.el7uek.x86_64",
+ "cindex": 94
+ },
{
"distrib": "ol",
"version": "7",
@@ -23825,6 +23860,20 @@
"uname_release": "4.14.35-2047.542.2.el7uek.x86_64",
"cindex": 96
},
+ {
+ "distrib": "ol",
+ "version": "7",
+ "arch": "x86_64",
+ "uname_release": "4.14.35-2047.543.1.el7uek.x86_64",
+ "cindex": 96
+ },
+ {
+ "distrib": "ol",
+ "version": "7",
+ "arch": "x86_64",
+ "uname_release": "4.14.35-2047.543.2.el7uek.x86_64",
+ "cindex": 96
+ },
{
"distrib": "ol",
"version": "7",
diff --git a/pkg/security/probe/constantfetch/runtime_compiled.go b/pkg/security/probe/constantfetch/runtime_compiled.go
index cb004bf72897b..c847ab821e9a4 100644
--- a/pkg/security/probe/constantfetch/runtime_compiled.go
+++ b/pkg/security/probe/constantfetch/runtime_compiled.go
@@ -10,7 +10,7 @@ package constantfetch
import (
"bytes"
- "debug/elf"
+ "errors"
"fmt"
"sort"
"text/template"
@@ -20,6 +20,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/ebpf"
"github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime"
"github.com/DataDog/datadog-agent/pkg/security/seclog"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
type rcSymbolPair struct {
@@ -121,7 +122,7 @@ func (cf *RuntimeCompilationConstantFetcher) FinishAndGetResults() (map[string]u
return nil, err
}
- f, err := elf.NewFile(elfFile)
+ f, err := safeelf.NewFile(elfFile)
if err != nil {
return nil, err
}
@@ -136,6 +137,9 @@ func (cf *RuntimeCompilationConstantFetcher) FinishAndGetResults() (map[string]u
}
section := f.Sections[sym.Section]
+ if section.ReaderAt == nil {
+ return nil, errors.New("section not available in random-access form")
+ }
buf := make([]byte, sym.Size)
if _, err := section.ReadAt(buf, int64(sym.Value)); err != nil {
return nil, fmt.Errorf("unable to read section at %d: %s", int64(sym.Value), err)
diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go
index fa8600bf245e9..cf28d72d46ccc 100644
--- a/pkg/security/probe/probe_ebpf.go
+++ b/pkg/security/probe/probe_ebpf.go
@@ -429,7 +429,7 @@ func (p *EBPFProbe) playSnapshot(notifyConsumers bool) {
var events []*model.Event
entryToEvent := func(entry *model.ProcessCacheEntry) {
- if entry.Source != model.ProcessCacheEntryFromSnapshot && !entry.IsExec {
+ if entry.Source != model.ProcessCacheEntryFromSnapshot {
return
}
entry.Retain()
diff --git a/pkg/security/probe/probe_kernel_file_windows.go b/pkg/security/probe/probe_kernel_file_windows.go
index ee34f239944e9..f0d3d99de673d 100644
--- a/pkg/security/probe/probe_kernel_file_windows.go
+++ b/pkg/security/probe/probe_kernel_file_windows.go
@@ -49,6 +49,7 @@ type fileObjectPointer uint64
var (
errDiscardedPath = errors.New("discarded path")
+ errReadNoPath = errors.New("read with no path")
)
/*
@@ -540,7 +541,10 @@ func (wp *WindowsProbe) parseReadArgs(e *etw.DDEventRecord) (*readArgs, error) {
if s, ok := wp.filePathResolver.Get(fileObjectPointer(ra.fileObject)); ok {
ra.fileName = s.fileName
ra.userFileName = s.userFileName
+ } else {
+ return nil, errReadNoPath
}
+
return ra, nil
}
diff --git a/pkg/security/probe/probe_kernel_file_windows_test.go b/pkg/security/probe/probe_kernel_file_windows_test.go
index 3adf17edf4cc7..c7994bd77dcb0 100644
--- a/pkg/security/probe/probe_kernel_file_windows_test.go
+++ b/pkg/security/probe/probe_kernel_file_windows_test.go
@@ -18,10 +18,12 @@ import (
"github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest"
- "github.com/DataDog/datadog-agent/pkg/security/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/DataDog/datadog-agent/pkg/security/config"
+ "github.com/DataDog/datadog-agent/pkg/security/secl/model"
+
"golang.org/x/sys/windows"
)
@@ -40,9 +42,10 @@ func createTestProbe() (*WindowsProbe, error) {
if err != nil {
return nil, err
}
- wp.isRenameEnabled = true
- wp.isDeleteEnabled = true
- wp.isWriteEnabled = true
+
+ wp.enabledEventTypes[model.FileRenameEventType.String()] = true
+ wp.enabledEventTypes[model.DeleteFileEventType.String()] = true
+ wp.enabledEventTypes[model.WriteFileEventType.String()] = true
err = wp.Init()
diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go
index 9ef7e7e8d358c..8d4019daaf1ed 100644
--- a/pkg/security/probe/probe_windows.go
+++ b/pkg/security/probe/probe_windows.go
@@ -11,7 +11,6 @@ import (
"errors"
"fmt"
"path/filepath"
- "slices"
"sync"
"time"
@@ -79,6 +78,10 @@ type WindowsProbe struct {
// a well-known provider
auditSession etw.Session
+ // rate limiters
+ writeKey writeRateLimiterKey // use a single key for all write events to avoid memory allocations
+ writeRateLimiter *utils.Limiter[writeRateLimiterKey]
+
// path caches
filePathResolver *lru.Cache[fileObjectPointer, fileCache]
regPathResolver *lru.Cache[regObjectPointer, string]
@@ -102,19 +105,21 @@ type WindowsProbe struct {
// actions
processKiller *ProcessKiller
- // enabled probes
- isRenameEnabled bool
- isWriteEnabled bool
- isDeleteEnabled bool
- isChangePermissionEnabled bool
+ enabledEventTypesLock sync.RWMutex
+ enabledEventTypes map[string]bool
+
// channel handling. Currently configurable, but should probably be set
// to false with a configurable size value
blockonchannelsend bool
// approvers
- currentEventTypes []string
- approvers map[eval.Field][]approver
- approverLock sync.RWMutex
+ approvers map[eval.Field][]approver
+ approverLock sync.RWMutex
+}
+
+type writeRateLimiterKey struct {
+ fileObject fileObjectPointer
+ processID uint32
}
type approver interface {
@@ -309,16 +314,17 @@ func (p *WindowsProbe) reconfigureProvider() error {
idClose,
}
- if p.isWriteEnabled {
+ // reconfigureProvider should be called with the enabledEventTypesLock held for reading
+ if p.enabledEventTypes[model.WriteFileEventType.String()] {
fileIDs = append(fileIDs, idWrite)
}
- if p.isRenameEnabled {
+ if p.enabledEventTypes[model.FileRenameEventType.String()] {
fileIDs = append(fileIDs, idRename, idRenamePath, idRename29)
}
- if p.isDeleteEnabled {
+ if p.enabledEventTypes[model.DeleteFileEventType.String()] {
fileIDs = append(fileIDs, idSetDelete, idDeletePath)
}
- if p.isChangePermissionEnabled {
+ if p.enabledEventTypes[model.ChangePermissionEventType.String()] {
fileIDs = append(fileIDs, idObjectPermsChange)
}
@@ -352,6 +358,23 @@ func (p *WindowsProbe) reconfigureProvider() error {
// try masking on create & create_new_file
// given the current requirements, I think we can _probably_ just do create_new_file
cfg.MatchAnyKeyword = 0xF7E3
+
+ regIDs := []uint16{}
+ // reconfigureProvider should be called with the enabledEventTypesLock held for reading
+ if p.enabledEventTypes[model.CreateRegistryKeyEventType.String()] {
+ regIDs = append(regIDs, idRegCreateKey)
+ }
+ if p.enabledEventTypes[model.OpenRegistryKeyEventType.String()] {
+ regIDs = append(regIDs, idRegOpenKey)
+ }
+ if p.enabledEventTypes[model.DeleteRegistryKeyEventType.String()] {
+ regIDs = append(regIDs, idRegDeleteKey)
+ }
+ if p.enabledEventTypes[model.SetRegistryKeyValueEventType.String()] {
+ regIDs = append(regIDs, idRegSetValueKey)
+ }
+
+ cfg.EnabledIDs = regIDs
})
if p.auditSession != nil {
@@ -382,11 +405,15 @@ func (p *WindowsProbe) Setup() error {
func (p *WindowsProbe) Stop() {
if p.fimSession != nil || p.auditSession != nil {
if p.fimSession != nil {
- _ = p.fimSession.StopTracing()
+ if err := p.fimSession.StopTracing(); err != nil {
+ log.Errorf("Error stopping tracing %v", err)
+ }
}
if p.auditSession != nil {
log.Info("Calling stoptracing on audit session")
- _ = p.auditSession.StopTracing()
+ if err := p.auditSession.StopTracing(); err != nil {
+ log.Errorf("Error stopping tracing audit %v", err)
+ }
}
p.fimwg.Wait()
}
@@ -419,8 +446,10 @@ func (p *WindowsProbe) approve(field eval.Field, eventType string, value string)
approvers, exists := p.approvers[field]
if !exists {
+ p.enabledEventTypesLock.RLock()
+ defer p.enabledEventTypesLock.RUnlock()
// no approvers, so no filtering for this field, except if no rule for this event type
- return slices.Contains(p.currentEventTypes, eventType)
+ return p.enabledEventTypes[eventType]
}
for _, approver := range approvers {
@@ -440,11 +469,9 @@ func (p *WindowsProbe) auditEtw(ecb etwCallback) error {
case etw.DDGUID(p.auditguid):
switch e.EventHeader.EventDescriptor.ID {
case idObjectPermsChange:
- if p.isChangePermissionEnabled {
- if pc, err := p.parseObjectPermsChange(e); err == nil {
- log.Infof("Received objectPermsChange event %d %s\n", e.EventHeader.EventDescriptor.ID, pc)
- ecb(pc, e.EventHeader.ProcessID)
- }
+ if pc, err := p.parseObjectPermsChange(e); err == nil {
+ log.Tracef("Received objectPermsChange event %d %s\n", e.EventHeader.EventDescriptor.ID, pc)
+ ecb(pc, e.EventHeader.ProcessID)
}
}
}
@@ -532,7 +559,6 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
}
case idFlush:
if fa, err := p.parseFlushArgs(e); err == nil {
-
p.stats.fpnLock.Lock()
p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.fpnLock.Unlock()
@@ -541,89 +567,89 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
}
case idWrite:
- if p.isWriteEnabled {
- if wa, err := p.parseWriteArgs(e); err == nil {
- //fmt.Printf("Received Write event %d %s\n", e.EventHeader.EventDescriptor.ID, wa)
- log.Tracef("Received Write event %d %s\n", e.EventHeader.EventDescriptor.ID, wa)
- ecb(wa, e.EventHeader.ProcessID)
- p.stats.fpnLock.Lock()
- p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
- p.stats.fpnLock.Unlock()
- }
+ if wa, err := p.parseWriteArgs(e); err == nil {
+ //fmt.Printf("Received Write event %d %s\n", e.EventHeader.EventDescriptor.ID, wa)
+ log.Tracef("Received Write event %d %s\n", e.EventHeader.EventDescriptor.ID, wa)
+
+ p.stats.fpnLock.Lock()
+ p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
+ p.stats.fpnLock.Unlock()
+
+ ecb(wa, e.EventHeader.ProcessID)
}
case idSetInformation:
if si, err := p.parseInformationArgs(e); err == nil {
log.Tracef("Received SetInformation event %d %s\n", e.EventHeader.EventDescriptor.ID, si)
- ecb(si, e.EventHeader.ProcessID)
+
p.stats.fpnLock.Lock()
p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.fpnLock.Unlock()
+
+ ecb(si, e.EventHeader.ProcessID)
}
case idSetDelete:
- if p.isDeleteEnabled {
- if sd, err := p.parseSetDeleteArgs(e); err == nil {
- log.Tracef("Received SetDelete event %d %s\n", e.EventHeader.EventDescriptor.ID, sd)
- ecb(sd, e.EventHeader.ProcessID)
-
- p.stats.fpnLock.Lock()
- p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
- p.stats.fpnLock.Unlock()
- }
+ if sd, err := p.parseSetDeleteArgs(e); err == nil {
+ log.Tracef("Received SetDelete event %d %s\n", e.EventHeader.EventDescriptor.ID, sd)
+
+ p.stats.fpnLock.Lock()
+ p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
+ p.stats.fpnLock.Unlock()
+
+ ecb(sd, e.EventHeader.ProcessID)
}
case idDeletePath:
- if p.isDeleteEnabled {
- if dp, err := p.parseDeletePathArgs(e); err == nil {
- log.Tracef("Received DeletePath event %d %s\n", e.EventHeader.EventDescriptor.ID, dp)
+ if dp, err := p.parseDeletePathArgs(e); err == nil {
+ log.Tracef("Received DeletePath event %d %s\n", e.EventHeader.EventDescriptor.ID, dp)
- p.stats.fpnLock.Lock()
- p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
- p.stats.fpnLock.Unlock()
+ p.stats.fpnLock.Lock()
+ p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
+ p.stats.fpnLock.Unlock()
- ecb(dp, e.EventHeader.ProcessID)
- }
+ ecb(dp, e.EventHeader.ProcessID)
}
case idRename:
- if p.isRenameEnabled {
- if rn, err := p.parseRenameArgs(e); err == nil {
- log.Tracef("Received Rename event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
- ecb(rn, e.EventHeader.ProcessID)
-
- p.stats.fpnLock.Lock()
- p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
- p.stats.fpnLock.Unlock()
- }
+ if rn, err := p.parseRenameArgs(e); err == nil {
+ log.Tracef("Received Rename event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
+
+ p.stats.fpnLock.Lock()
+ p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
+ p.stats.fpnLock.Unlock()
+
+ ecb(rn, e.EventHeader.ProcessID)
}
case idRenamePath:
- if p.isRenameEnabled {
- if rn, err := p.parseRenamePathArgs(e); err == nil {
- log.Tracef("Received RenamePath event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
- ecb(rn, e.EventHeader.ProcessID)
- p.stats.fpnLock.Lock()
- p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
- p.stats.fpnLock.Unlock()
- }
+ if rn, err := p.parseRenamePathArgs(e); err == nil {
+ log.Tracef("Received RenamePath event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
+
+ p.stats.fpnLock.Lock()
+ p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
+ p.stats.fpnLock.Unlock()
+
+ ecb(rn, e.EventHeader.ProcessID)
}
case idFSCTL:
if fs, err := p.parseFsctlArgs(e); err == nil {
log.Tracef("Received FSCTL event %d %s\n", e.EventHeader.EventDescriptor.ID, fs)
- ecb(fs, e.EventHeader.ProcessID)
+
p.stats.fpnLock.Lock()
p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.fpnLock.Unlock()
+
+ ecb(fs, e.EventHeader.ProcessID)
}
case idRename29:
- if p.isRenameEnabled {
- if rn, err := p.parseRename29Args(e); err == nil {
- log.Tracef("Received Rename29 event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
- ecb(rn, e.EventHeader.ProcessID)
- }
+ if rn, err := p.parseRename29Args(e); err == nil {
+ log.Tracef("Received Rename29 event %d %s\n", e.EventHeader.EventDescriptor.ID, rn)
+
p.stats.fpnLock.Lock()
p.stats.fileProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.fpnLock.Unlock()
+
+ ecb(rn, e.EventHeader.ProcessID)
}
}
@@ -635,32 +661,38 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
case idRegCreateKey:
if cka, err := p.parseCreateRegistryKey(e); err == nil {
log.Tracef("Got idRegCreateKey %s", cka)
- ecb(cka, e.EventHeader.ProcessID)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
+ ecb(cka, e.EventHeader.ProcessID)
+
}
case idRegOpenKey:
if cka, err := p.parseOpenRegistryKey(e); err == nil {
log.Tracef("Got idRegOpenKey %s", cka)
- ecb(cka, e.EventHeader.ProcessID)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
+
+ ecb(cka, e.EventHeader.ProcessID)
}
case idRegDeleteKey:
if dka, err := p.parseDeleteRegistryKey(e); err == nil {
log.Tracef("Got idRegDeleteKey %v", dka)
- ecb(dka, e.EventHeader.ProcessID)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
+ ecb(dka, e.EventHeader.ProcessID)
}
case idRegFlushKey:
if dka, err := p.parseFlushKey(e); err == nil {
log.Tracef("Got idRegFlushKey %v", dka)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
@@ -668,6 +700,7 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
case idRegCloseKey:
if dka, err := p.parseCloseKeyArgs(e); err == nil {
log.Tracef("Got idRegCloseKey %s", dka)
+
p.regPathResolver.Remove(dka.keyObject)
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
@@ -677,6 +710,7 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
case idQuerySecurityKey:
if dka, err := p.parseQuerySecurityKeyArgs(e); err == nil {
log.Tracef("Got idQuerySecurityKey %v", dka.keyName)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
@@ -684,6 +718,7 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
case idSetSecurityKey:
if dka, err := p.parseSetSecurityKeyArgs(e); err == nil {
log.Tracef("Got idSetSecurityKey %v", dka.keyName)
+
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
p.stats.rpnLock.Unlock()
@@ -691,6 +726,7 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
case idRegSetValueKey:
if svk, err := p.parseSetValueKey(e); err == nil {
log.Tracef("Got idRegSetValueKey %s", svk)
+
ecb(svk, e.EventHeader.ProcessID)
p.stats.rpnLock.Lock()
p.stats.regProcessedNotifications[e.EventHeader.EventDescriptor.ID]++
@@ -703,6 +739,34 @@ func (p *WindowsProbe) setupEtw(ecb etwCallback) error {
}
+func (p *WindowsProbe) preChanETWHandle(arg interface{}) bool {
+ switch arg := arg.(type) {
+ case *closeArgs, *cleanupArgs, *createHandleArgs:
+ return false
+ case *renameArgs:
+ fc := fileCache{
+ fileName: arg.fileName,
+ userFileName: arg.userFileName,
+ }
+ p.renamePreArgs.Add(uint64(arg.fileObject), fc)
+ return false
+ case *rename29Args:
+ fc := fileCache{
+ fileName: arg.fileName,
+ userFileName: arg.userFileName,
+ }
+ p.renamePreArgs.Add(uint64(arg.fileObject), fc)
+ return false
+ case *writeArgs:
+ // rate limit bursts of write events
+ p.writeKey.fileObject = arg.fileObject
+ p.writeKey.processID = arg.DDEventHeader.ProcessID
+ return p.writeRateLimiter.Allow(p.writeKey)
+ default:
+ return true
+ }
+}
+
// Start processing events
func (p *WindowsProbe) Start() error {
@@ -715,6 +779,10 @@ func (p *WindowsProbe) Start() error {
go func() {
defer p.fimwg.Done()
err := p.setupEtw(func(n interface{}, pid uint32) {
+ if !p.preChanETWHandle(n) {
+ return
+ }
+
if p.blockonchannelsend {
p.onETWNotification <- etwNotification{n, pid}
} else {
@@ -725,7 +793,7 @@ func (p *WindowsProbe) Start() error {
}
}
})
- log.Infof("Done StartTracing %v", err)
+ log.Infof("Done StartTracing %v, lost events: %d", err, p.stats.etwChannelBlocked)
}()
}
if p.auditSession != nil {
@@ -768,7 +836,9 @@ func (p *WindowsProbe) Start() error {
continue
}
case notif := <-p.onETWNotification:
- p.handleETWNotification(ev, notif)
+ if ok := p.handleETWNotification(ev, notif); !ok {
+ continue
+ }
}
p.DispatchEvent(ev)
@@ -849,7 +919,7 @@ func (p *WindowsProbe) handleProcessStop(ev *model.Event, stop *procmon.ProcessS
return true
}
-func (p *WindowsProbe) handleETWNotification(ev *model.Event, notif etwNotification) {
+func (p *WindowsProbe) handleETWNotification(ev *model.Event, notif etwNotification) bool {
// handle incoming events here
// each event will come in as a different type
// parse it with
@@ -864,23 +934,11 @@ func (p *WindowsProbe) handleETWNotification(ev *model.Event, notif etwNotificat
BasenameStr: filepath.Base(arg.fileName),
},
}
- case *renameArgs:
- fc := fileCache{
- fileName: arg.fileName,
- userFileName: arg.userFileName,
- }
- p.renamePreArgs.Add(uint64(arg.fileObject), fc)
- case *rename29Args:
- fc := fileCache{
- fileName: arg.fileName,
- userFileName: arg.userFileName,
- }
- p.renamePreArgs.Add(uint64(arg.fileObject), fc)
case *renamePath:
fileCache, found := p.renamePreArgs.Get(uint64(arg.fileObject))
if !found {
log.Debugf("unable to find renamePreArgs for %d", uint64(arg.fileObject))
- return
+ return false
}
ev.Type = uint32(model.FileRenameEventType)
ev.RenameFile = model.RenameFileEvent{
@@ -964,12 +1022,16 @@ func (p *WindowsProbe) handleETWNotification(ev *model.Event, notif etwNotificat
}
}
- if ev.Type != uint32(model.UnknownEventType) {
- errRes := p.setProcessContext(notif.pid, ev)
- if errRes != nil {
- log.Debugf("%v", errRes)
- }
+ if ev.Type == uint32(model.UnknownEventType) {
+ log.Debugf("unknown event type: %T", notif.arg)
+ return false
}
+
+ errRes := p.setProcessContext(notif.pid, ev)
+ if errRes != nil {
+ log.Debugf("%v", errRes)
+ }
+ return true
}
func (p *WindowsProbe) setProcessContext(pid uint32, event *model.Event) error {
@@ -1160,6 +1222,12 @@ func initializeWindowsProbe(config *config.Config, opts Opts) (*WindowsProbe, er
return nil, err
}
+ // only allow 1 write event per second per file per process
+ writeRateLimiter, err := utils.NewLimiter[writeRateLimiterKey](config.RuntimeSecurity.WindowsWriteEventRateLimiterMaxAllowed, 1, config.RuntimeSecurity.WindowsWriteEventRateLimiterPeriod)
+ if err != nil {
+ return nil, err
+ }
+
rnc, err := lru.New[uint64, fileCache](5)
if err != nil {
return nil, err
@@ -1199,10 +1267,14 @@ func initializeWindowsProbe(config *config.Config, opts Opts) (*WindowsProbe, er
discardedFileHandles: dfh,
+ enabledEventTypes: make(map[string]bool),
+
approvers: make(map[eval.Field][]approver),
volumeMap: make(map[string]string),
+ writeRateLimiter: writeRateLimiter,
+
processKiller: processKiller,
blockonchannelsend: bocs,
@@ -1250,24 +1322,12 @@ func NewWindowsProbe(probe *Probe, config *config.Config, opts Opts, telemetry t
// ApplyRuleSet setup the probes for the provided set of rules and returns the policy report.
func (p *WindowsProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetReport, error) {
- p.isWriteEnabled = false
- p.isRenameEnabled = false
- p.isDeleteEnabled = false
- p.isChangePermissionEnabled = false
- p.currentEventTypes = rs.GetEventTypes()
-
- for _, eventType := range p.currentEventTypes {
- switch eventType {
- case model.FileRenameEventType.String():
- p.isRenameEnabled = true
- case model.WriteFileEventType.String():
- p.isWriteEnabled = true
- case model.DeleteFileEventType.String():
- p.isDeleteEnabled = true
- case model.ChangePermissionEventType.String():
- p.isChangePermissionEnabled = true
- }
+ p.enabledEventTypesLock.Lock()
+ clear(p.enabledEventTypes)
+ for _, eventType := range rs.GetEventTypes() {
+ p.enabledEventTypes[eventType] = true
}
+ p.enabledEventTypesLock.Unlock()
ars, err := kfilters.NewApplyRuleSetReport(p.config.Probe, rs)
if err != nil {
@@ -1277,7 +1337,6 @@ func (p *WindowsProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRe
// remove old approvers
p.approverLock.Lock()
defer p.approverLock.Unlock()
-
clear(p.approvers)
for eventType, report := range ars.Policies {
@@ -1286,6 +1345,8 @@ func (p *WindowsProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRe
}
}
+ p.enabledEventTypesLock.RLock()
+ defer p.enabledEventTypesLock.RUnlock()
if err := p.reconfigureProvider(); err != nil {
return nil, err
}
diff --git a/pkg/security/ptracer/utils.go b/pkg/security/ptracer/utils.go
index f313d02415800..7edfc091c612b 100644
--- a/pkg/security/ptracer/utils.go
+++ b/pkg/security/ptracer/utils.go
@@ -11,7 +11,6 @@ package ptracer
import (
"bufio"
"bytes"
- "debug/elf"
"errors"
"fmt"
"io"
@@ -31,6 +30,7 @@ import (
"github.com/DataDog/datadog-agent/pkg/security/proto/ebpfless"
"github.com/DataDog/datadog-agent/pkg/security/secl/containerutils"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
+ "github.com/DataDog/datadog-agent/pkg/util/safeelf"
)
// Funcs mainly copied from github.com/DataDog/datadog-agent/pkg/security/utils/cgroup.go
@@ -459,7 +459,7 @@ func microsecsToNanosecs(secs uint64) uint64 {
}
func getModuleName(reader io.ReaderAt) (string, error) {
- elf, err := elf.NewFile(reader)
+ elf, err := safeelf.NewFile(reader)
if err != nil {
return "", err
}
diff --git a/pkg/security/resolvers/process/resolver_ebpf.go b/pkg/security/resolvers/process/resolver_ebpf.go
index c055c76b18919..ef06b13cbefd8 100644
--- a/pkg/security/resolvers/process/resolver_ebpf.go
+++ b/pkg/security/resolvers/process/resolver_ebpf.go
@@ -1268,6 +1268,10 @@ func (p *EBPFResolver) newEntryFromProcfsAndSyncKernelMaps(proc *process.Process
} else { // exec
entry.SetExecParent(parent)
}
+ } else if pid == 1 {
+ entry.SetAsExec()
+ } else {
+ seclog.Debugf("unable to set the type of process, not pid 1, no parent in cache: %+v", entry)
}
p.insertEntry(entry, p.entryCache[pid], source)
diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go
index 83697d9a78b8a..0ea241aac7195 100644
--- a/pkg/security/resolvers/resolvers_ebpf.go
+++ b/pkg/security/resolvers/resolvers_ebpf.go
@@ -95,7 +95,7 @@ func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdCli
if opts.TagsResolver != nil {
tagsResolver = opts.TagsResolver
} else {
- tagsResolver = tags.NewResolver(config.Probe, telemetry)
+ tagsResolver = tags.NewResolver(telemetry)
}
cgroupsResolver, err := cgroup.NewResolver(tagsResolver)
diff --git a/pkg/security/resolvers/resolvers_ebpfless.go b/pkg/security/resolvers/resolvers_ebpfless.go
index f4efbf35e7f10..93a8d97f0ead7 100644
--- a/pkg/security/resolvers/resolvers_ebpfless.go
+++ b/pkg/security/resolvers/resolvers_ebpfless.go
@@ -37,7 +37,7 @@ func NewEBPFLessResolvers(config *config.Config, statsdClient statsd.ClientInter
if opts.TagsResolver != nil {
tagsResolver = opts.TagsResolver
} else {
- tagsResolver = tags.NewResolver(config.Probe, telemetry)
+ tagsResolver = tags.NewResolver(telemetry)
}
processOpts := process.NewResolverOpts()
diff --git a/pkg/security/resolvers/resolvers_windows.go b/pkg/security/resolvers/resolvers_windows.go
index 09370a5f66ed6..762e2abed0d9e 100644
--- a/pkg/security/resolvers/resolvers_windows.go
+++ b/pkg/security/resolvers/resolvers_windows.go
@@ -35,7 +35,7 @@ func NewResolvers(config *config.Config, statsdClient statsd.ClientInterface, sc
return nil, err
}
- tagsResolver := tags.NewResolver(config.Probe, telemetry)
+ tagsResolver := tags.NewResolver(telemetry)
userSessionsResolver, err := usersessions.NewResolver(config.RuntimeSecurity)
if err != nil {
diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go
index c8674a0bfa3f0..58255c69396e8 100644
--- a/pkg/security/resolvers/tags/resolver.go
+++ b/pkg/security/resolvers/tags/resolver.go
@@ -8,14 +8,16 @@ package tags
import (
"context"
+ "fmt"
"strings"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/remote"
- taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry"
+ coreconfig "github.com/DataDog/datadog-agent/comp/core/config"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ remoteTagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-remote"
"github.com/DataDog/datadog-agent/comp/core/tagger/types"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/api/security"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
- "github.com/DataDog/datadog-agent/pkg/security/probe/config"
"github.com/DataDog/datadog-agent/pkg/security/utils"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
@@ -27,20 +29,6 @@ type Tagger interface {
Tag(entity types.EntityID, cardinality types.TagCardinality) ([]string, error)
}
-type nullTagger struct{}
-
-func (n *nullTagger) Start(_ context.Context) error {
- return nil
-}
-
-func (n *nullTagger) Stop() error {
- return nil
-}
-
-func (n *nullTagger) Tag(_ types.EntityID, _ types.TagCardinality) ([]string, error) {
- return nil, nil
-}
-
// Resolver represents a cache resolver
type Resolver interface {
Start(ctx context.Context) error
@@ -101,18 +89,24 @@ func (t *DefaultResolver) Stop() error {
}
// NewResolver returns a new tags resolver
-func NewResolver(config *config.Config, telemetry telemetry.Component) Resolver {
- if config.RemoteTaggerEnabled {
- options, err := remote.NodeAgentOptionsForSecurityResolvers(pkgconfigsetup.Datadog())
- if err != nil {
- log.Errorf("unable to configure the remote tagger: %s", err)
- } else {
- return &DefaultResolver{
- tagger: remote.NewTagger(options, pkgconfigsetup.Datadog(), taggerTelemetry.NewStore(telemetry), types.NewMatchAllFilter()),
+func NewResolver(telemetry telemetry.Component) Resolver {
+ ddConfig := pkgconfigsetup.Datadog()
+
+ params := tagger.RemoteParams{
+ RemoteFilter: types.NewMatchAllFilter(),
+ RemoteTarget: func(c coreconfig.Component) (string, error) { return fmt.Sprintf(":%v", c.GetInt("cmd_port")), nil },
+ RemoteTokenFetcher: func(c coreconfig.Component) func() (string, error) {
+ return func() (string, error) {
+ return security.FetchAuthToken(c)
}
- }
+ },
}
+
+ tagger, _ := remoteTagger.NewRemoteTagger(params, ddConfig, log.NewWrapper(2), telemetry)
+
return &DefaultResolver{
- tagger: &nullTagger{},
+ // TODO: (components) use the actual remote tagger instance from the Fx entry point
+ tagger: tagger,
}
+
}
diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go
index f8775862623be..dc8804a459c50 100644
--- a/pkg/security/secl/compiler/eval/context.go
+++ b/pkg/security/secl/compiler/eval/context.go
@@ -33,6 +33,8 @@ type Context struct {
Registers map[RegisterID]int
now time.Time
+
+ CachedAncestorsCount int
}
// Now return and cache the `now` timestamp
@@ -58,6 +60,7 @@ func (c *Context) Reset() {
clear(c.BoolCache)
clear(c.Registers)
clear(c.RegisterCache)
+ c.CachedAncestorsCount = 0
}
// NewContext return a new Context
diff --git a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl b/pkg/security/secl/compiler/generators/accessors/accessors.tmpl
index c9c256021d3b5..7f5dcfcf231c0 100644
--- a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl
+++ b/pkg/security/secl/compiler/generators/accessors/accessors.tmpl
@@ -131,6 +131,37 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
return results
}
+ {{if and $Field.Iterator.IsOrigTypePtr (not $Field.GetArrayPrefix) $Field.Handler }}
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) {{$Field.ReturnType}} {
+ {{range $Check := $Checks}}
+ {{if $Field.Iterator.Name | HasPrefix $Check}}
+ {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}}
+ {{$Check = $SubName | printf "pce%s"}}
+ if !{{$Check}}() {
+ return {{$Field.GetDefaultScalarReturnValue}}
+ }
+ {{end}}
+ {{end}}
+
+ {{$SubName := $Field.Iterator.Name | TrimPrefix $Field.Name}}
+
+ {{$Return := $SubName | printf "pce%s"}}
+ {{$SubName = $Field.Iterator.Name | TrimPrefix $Field.Prefix}}
+ {{$Handler := $Field.Iterator.Name | TrimPrefix $Field.Handler}}
+ {{$Return = print "ev.FieldHandlers." $Handler "(ev, &pce" $SubName ")"}}
+
+ {{if eq $Field.ReturnType "int"}}
+ {{if $Field.IsLength}}
+ return len({{".length" | TrimSuffix $Return}})
+ {{else}}
+ return int({{$Return}})
+ {{end}}
+ {{else}}
+ return {{$Return}}
+ {{end}}
+ })
+
+ {{else}}
value := iterator.Front(ctx)
for value != nil {
{{if $Field.Iterator.IsOrigTypePtr}}
@@ -178,6 +209,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
value = iterator.Next()
}
+ {{end}}
ctx.{{$Field.GetCacheName}}[field] = results
diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go
index 1d7cd758a4dad..417bd66d33417 100644
--- a/pkg/security/secl/model/accessors_unix.go
+++ b/pkg/security/secl/model/accessors_unix.go
@@ -4670,13 +4670,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -4763,13 +4759,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -4825,13 +4817,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5006,13 +4994,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5037,13 +5021,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5098,13 +5078,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5129,13 +5105,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -5282,13 +5254,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -5414,18 +5382,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5491,18 +5453,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5569,18 +5525,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -5761,18 +5711,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5798,13 +5742,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -5832,18 +5772,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5871,18 +5805,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5910,18 +5838,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5950,18 +5872,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -5987,13 +5903,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -6021,18 +5933,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -6098,18 +6004,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6355,18 +6255,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6432,18 +6326,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6510,18 +6398,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -6702,18 +6584,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6739,13 +6615,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -6773,18 +6645,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6812,18 +6678,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6851,18 +6711,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6891,18 +6745,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -6928,13 +6776,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -6962,18 +6806,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -7039,18 +6877,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -7135,13 +6967,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -7386,13 +7214,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -7417,13 +7241,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9387,13 +9207,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9480,13 +9296,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -9542,13 +9354,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9723,13 +9531,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9754,13 +9558,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9815,13 +9615,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -9846,13 +9642,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -9999,13 +9791,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -10131,18 +9919,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10208,18 +9990,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10286,18 +10062,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -10478,18 +10248,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10515,13 +10279,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -10549,18 +10309,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10588,18 +10342,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10627,18 +10375,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10667,18 +10409,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -10704,13 +10440,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -10738,18 +10470,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -10815,18 +10541,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11072,18 +10792,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11149,18 +10863,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11227,18 +10935,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -11419,18 +11121,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11456,13 +11152,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -11490,18 +11182,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11529,18 +11215,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11568,18 +11248,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11608,18 +11282,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11645,13 +11313,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -11679,18 +11343,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -11756,18 +11414,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -11852,13 +11504,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -12103,13 +11751,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -12134,13 +11778,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15258,13 +14898,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15351,13 +14987,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -15413,13 +15045,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15594,13 +15222,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15625,13 +15249,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15686,13 +15306,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -15717,13 +15333,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -15870,13 +15482,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -16002,18 +15610,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16079,18 +15681,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16157,18 +15753,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -16349,18 +15939,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16386,13 +15970,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -16420,18 +16000,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16459,18 +16033,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16498,18 +16066,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16538,18 +16100,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16575,13 +16131,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -16609,18 +16161,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -16686,18 +16232,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.IsNotKworker() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.IsNotKworker() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -16943,18 +16483,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17020,18 +16554,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17098,18 +16626,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, false)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return false
}
- result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -17290,18 +16812,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17327,13 +16843,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -17361,18 +16873,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17400,18 +16906,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17439,18 +16939,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17479,18 +16973,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17516,13 +17004,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -17550,18 +17034,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, 0)
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return 0
}
- result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
- results = append(results, result)
- value = iterator.Next()
- }
+ return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -17627,18 +17105,12 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- if !element.ProcessContext.Process.HasInterpreter() {
- results = append(results, "")
- value = iterator.Next()
- continue
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ if !pce.ProcessContext.Process.HasInterpreter() {
+ return ""
}
- result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
- results = append(results, result)
- value = iterator.Next()
- }
+ return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -17723,13 +17195,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool {
+ return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process)
+ })
ctx.BoolCache[field] = results
return results
}, Field: field,
@@ -17974,13 +17442,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -18005,13 +17469,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go
index 2cc1f461e6a14..e1b81df60702e 100644
--- a/pkg/security/secl/model/accessors_windows.go
+++ b/pkg/security/secl/model/accessors_windows.go
@@ -805,13 +805,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessCmdLine(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -866,13 +862,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -960,13 +952,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -992,13 +980,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -1024,13 +1008,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -1056,13 +1036,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -1156,13 +1132,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveUser(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
diff --git a/pkg/security/secl/model/process_cache_entry_unix.go b/pkg/security/secl/model/process_cache_entry_unix.go
index c99de2e92dc8e..ef0a2de2f2f61 100644
--- a/pkg/security/secl/model/process_cache_entry_unix.go
+++ b/pkg/security/secl/model/process_cache_entry_unix.go
@@ -100,6 +100,11 @@ func (pc *ProcessCacheEntry) SetExecParent(parent *ProcessCacheEntry) {
pc.IsExecExec = pc.Parent != nil && pc.Parent.IsExec
}
+// SetAsExec set the entry as an Exec
+func (pc *ProcessCacheEntry) SetAsExec() {
+ pc.IsExec = true
+}
+
// Exec replace a process
func (pc *ProcessCacheEntry) Exec(entry *ProcessCacheEntry) {
entry.SetExecParent(pc)
diff --git a/pkg/security/secl/model/process_cache_entry_unix_test.go b/pkg/security/secl/model/process_cache_entry_unix_test.go
index b9cb6b913b0ee..11d76f666afde 100644
--- a/pkg/security/secl/model/process_cache_entry_unix_test.go
+++ b/pkg/security/secl/model/process_cache_entry_unix_test.go
@@ -83,3 +83,28 @@ func TestHasValidLineage(t *testing.T) {
assert.ErrorAs(t, err, &mn)
})
}
+
+func TestEntryEquals(t *testing.T) {
+ e1 := NewProcessCacheEntry(nil)
+ e1.Pid = 2
+ e2 := NewProcessCacheEntry(nil)
+ e2.Pid = 3
+ assert.True(t, e1.Equals(e2))
+
+ // different file
+ e1.FileEvent.Inode = 33
+ e2.FileEvent.Inode = 44
+ assert.False(t, e1.Equals(e2))
+
+ // same file
+ e2.FileEvent.Inode = 33
+ assert.True(t, e1.Equals(e2))
+
+ // different args
+ e2.ArgsEntry = &ArgsEntry{Values: []string{"aaa"}}
+ assert.False(t, e1.Equals(e2))
+
+ // same args
+ e1.ArgsEntry = &ArgsEntry{Values: []string{"aaa"}}
+ assert.True(t, e1.Equals(e2))
+}
diff --git a/pkg/security/secl/model/string_array_iter.go b/pkg/security/secl/model/string_array_iter.go
new file mode 100644
index 0000000000000..0664c2737b478
--- /dev/null
+++ b/pkg/security/secl/model/string_array_iter.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package model holds model related files
+package model
+
+import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
+
+func newAncestorsIterator[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) T) []T {
+ results := make([]T, 0, ctx.CachedAncestorsCount)
+ for pce := iter.Front(ctx); pce != nil; pce = iter.Next() {
+ results = append(results, perIter(ev, pce))
+ }
+ ctx.CachedAncestorsCount = len(results)
+
+ return results
+}
diff --git a/pkg/security/seclwin/doc.go b/pkg/security/seclwin/doc.go
new file mode 100644
index 0000000000000..5f96f758a40ca
--- /dev/null
+++ b/pkg/security/seclwin/doc.go
@@ -0,0 +1,8 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021-present Datadog, Inc.
+
+// Package secl provides utilities related to Datadog Cloud Workload Security Policy Language.
+// This module has no API stability guarantees.
+package secl
diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go
index c6e0e7b3bb650..174ed175a5976 100644
--- a/pkg/security/seclwin/model/accessors_win.go
+++ b/pkg/security/seclwin/model/accessors_win.go
@@ -803,13 +803,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveProcessCmdLine(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -864,13 +860,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -958,13 +950,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -990,13 +978,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -1022,13 +1006,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
@@ -1054,13 +1034,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent))
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int {
+ return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent))
+ })
ctx.IntCache[field] = results
return results
}, Field: field,
@@ -1154,13 +1130,9 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval
results = append(results, result)
return results
}
- value := iterator.Front(ctx)
- for value != nil {
- element := value
- result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process)
- results = append(results, result)
- value = iterator.Next()
- }
+ results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string {
+ return ev.FieldHandlers.ResolveUser(ev, &pce.ProcessContext.Process)
+ })
ctx.StringCache[field] = results
return results
}, Field: field,
diff --git a/pkg/security/seclwin/model/string_array_iter.go b/pkg/security/seclwin/model/string_array_iter.go
new file mode 100644
index 0000000000000..0664c2737b478
--- /dev/null
+++ b/pkg/security/seclwin/model/string_array_iter.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package model holds model related files
+package model
+
+import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
+
+func newAncestorsIterator[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) T) []T {
+ results := make([]T, 0, ctx.CachedAncestorsCount)
+ for pce := iter.Front(ctx); pce != nil; pce = iter.Next() {
+ results = append(results, perIter(ev, pce))
+ }
+ ctx.CachedAncestorsCount = len(results)
+
+ return results
+}
diff --git a/pkg/security/tests/activity_dumps_common.go b/pkg/security/tests/activity_dumps_common.go
index 0de24f7165c10..0115e6765b265 100644
--- a/pkg/security/tests/activity_dumps_common.go
+++ b/pkg/security/tests/activity_dumps_common.go
@@ -27,7 +27,6 @@ const (
)
var (
- testActivityDumpDuration = time.Minute * 10
testActivityDumpLoadControllerPeriod = time.Second * 10
)
diff --git a/pkg/security/tests/main_test.go b/pkg/security/tests/main_test.go
index 01c46181fc453..c9be26e73f733 100644
--- a/pkg/security/tests/main_test.go
+++ b/pkg/security/tests/main_test.go
@@ -34,6 +34,8 @@ func TestMain(m *testing.M) {
}
var (
+ commonCfgDir string
+
logLevelStr string
logPatterns stringSlice
logTags stringSlice
diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go
index 62f9a99fac628..13c316a16b7b1 100644
--- a/pkg/security/tests/module_tester.go
+++ b/pkg/security/tests/module_tester.go
@@ -56,14 +56,19 @@ const (
Skip
)
const (
- getEventTimeout = 10 * time.Second
- filelessExecutionFilenamePrefix = "memfd:"
+ getEventTimeout = 10 * time.Second
)
var (
errSkipEvent = errors.New("skip event")
)
+const (
+ testActivityDumpDuration = time.Minute * 10
+)
+
+var testMod *testModule
+
func (s *stringSlice) String() string {
return strings.Join(*s, " ")
}
@@ -517,6 +522,29 @@ func (tm *testModule) Create(filename string) (string, unsafe.Pointer, error) {
return testFile, testPtr, err
}
+// NewTimeoutError returns a new timeout error with the metrics collected during the test
+func (tm *testModule) NewTimeoutError() ErrTimeout {
+ var msg strings.Builder
+
+ msg.WriteString("timeout, details: ")
+ tm.writePlatformSpecificTimeoutError(&msg)
+
+ events := tm.ruleEngine.StopEventCollector()
+ if len(events) != 0 {
+ msg.WriteString("\nevents evaluated:\n")
+
+ for _, event := range events {
+ msg.WriteString(fmt.Sprintf("%s (eval=%v) {\n", event.Type, event.EvalResult))
+ for field, value := range event.Fields {
+ msg.WriteString(fmt.Sprintf("\t%s=%v,\n", field, value))
+ }
+ msg.WriteString("}\n")
+ }
+ }
+
+ return ErrTimeout{msg.String()}
+}
+
func (tm *testModule) WaitSignal(tb testing.TB, action func() error, cb onRuleHandler) {
tb.Helper()
@@ -886,3 +914,18 @@ func jsonPathValidation(testMod *testModule, data []byte, fnc func(testMod *test
fnc(testMod, obj)
}
+
+type onRuleHandler func(*model.Event, *rules.Rule)
+type onProbeEventHandler func(*model.Event)
+type onCustomSendEventHandler func(*rules.Rule, *events.CustomEvent)
+type onSendEventHandler func(*rules.Rule, *model.Event)
+type onDiscarderPushedHandler func(event eval.Event, field eval.Field, eventType eval.EventType) bool
+
+type eventHandlers struct {
+ sync.RWMutex
+ onRuleMatch onRuleHandler
+ onProbeEvent onProbeEventHandler
+ onCustomSendEvent onCustomSendEventHandler
+ onSendEvent onSendEventHandler
+ onDiscarderPushed onDiscarderPushedHandler
+}
diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go
index 86939f3db10cc..485a37422841a 100644
--- a/pkg/security/tests/module_tester_linux.go
+++ b/pkg/security/tests/module_tester_linux.go
@@ -38,14 +38,12 @@ import (
"github.com/DataDog/datadog-agent/pkg/eventmonitor"
secconfig "github.com/DataDog/datadog-agent/pkg/security/config"
"github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel"
- "github.com/DataDog/datadog-agent/pkg/security/events"
"github.com/DataDog/datadog-agent/pkg/security/module"
sprobe "github.com/DataDog/datadog-agent/pkg/security/probe"
"github.com/DataDog/datadog-agent/pkg/security/proto/api"
cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model"
rulesmodule "github.com/DataDog/datadog-agent/pkg/security/rules"
"github.com/DataDog/datadog-agent/pkg/security/rules/bundled"
- "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
"github.com/DataDog/datadog-agent/pkg/security/secl/rules"
activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree"
@@ -62,6 +60,10 @@ var (
logger seelog.LoggerInterface
)
+const (
+ filelessExecutionFilenamePrefix = "memfd:"
+)
+
const testConfig = `---
log_level: DEBUG
system_probe_config:
@@ -71,7 +73,6 @@ system_probe_config:
event_monitoring_config:
socket: /tmp/test-event-monitor.sock
- remote_tagger: false
custom_sensitive_words:
- "*custom*"
network:
@@ -229,24 +230,6 @@ type testModule struct {
msgSender *fakeMsgSender
}
-var testMod *testModule
-var commonCfgDir string
-
-type onRuleHandler func(*model.Event, *rules.Rule)
-type onProbeEventHandler func(*model.Event)
-type onCustomSendEventHandler func(*rules.Rule, *events.CustomEvent)
-type onSendEventHandler func(*rules.Rule, *model.Event)
-type onDiscarderPushedHandler func(event eval.Event, field eval.Field, eventType eval.EventType) bool
-
-type eventHandlers struct {
- sync.RWMutex
- onRuleMatch onRuleHandler
- onProbeEvent onProbeEventHandler
- onCustomSendEvent onCustomSendEventHandler
- onSendEvent onSendEventHandler
- onDiscarderPushed onDiscarderPushedHandler
-}
-
//nolint:deadcode,unused
func getInode(tb testing.TB, path string) uint64 {
fileInfo, err := os.Lstat(path)
@@ -1620,28 +1603,9 @@ func (tm *testModule) GetADSelector(dumpID *activityDumpIdentifier) (*cgroupMode
return &selector, err
}
-// NewTimeoutError returns a new timeout error with the metrics collected during the test
-func (tm *testModule) NewTimeoutError() ErrTimeout {
- var msg strings.Builder
-
- msg.WriteString("timeout, details: ")
- msg.WriteString(GetEBPFStatusMetrics(tm.probe))
- msg.WriteString(spew.Sdump(ebpftelemetry.GetProbeStats()))
-
- events := tm.ruleEngine.StopEventCollector()
- if len(events) != 0 {
- msg.WriteString("\nevents evaluated:\n")
-
- for _, event := range events {
- msg.WriteString(fmt.Sprintf("%s (eval=%v) {\n", event.Type, event.EvalResult))
- for field, value := range event.Fields {
- msg.WriteString(fmt.Sprintf("\t%s=%v,\n", field, value))
- }
- msg.WriteString("}\n")
- }
- }
-
- return ErrTimeout{msg.String()}
+func (tm *testModule) writePlatformSpecificTimeoutError(b *strings.Builder) {
+ b.WriteString(GetEBPFStatusMetrics(tm.probe))
+ b.WriteString(spew.Sdump(ebpftelemetry.GetProbeStats()))
}
func (tm *testModule) WaitSignals(tb testing.TB, action func() error, cbs ...func(event *model.Event, rule *rules.Rule) error) {
diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go
index 052350b7be636..e25537626a9cf 100644
--- a/pkg/security/tests/module_tester_windows.go
+++ b/pkg/security/tests/module_tester_windows.go
@@ -14,37 +14,26 @@ import (
"strings"
"sync"
"testing"
- "time"
"github.com/hashicorp/go-multierror"
"github.com/DataDog/datadog-agent/pkg/eventmonitor"
secconfig "github.com/DataDog/datadog-agent/pkg/security/config"
- "github.com/DataDog/datadog-agent/pkg/security/events"
"github.com/DataDog/datadog-agent/pkg/security/module"
sprobe "github.com/DataDog/datadog-agent/pkg/security/probe"
rulesmodule "github.com/DataDog/datadog-agent/pkg/security/rules"
- "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
"github.com/DataDog/datadog-agent/pkg/security/secl/rules"
"github.com/DataDog/datadog-agent/pkg/security/tests/statsdclient"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
-var (
- testActivityDumpDuration = time.Second * 30
- testActivityDumpLoadControllerPeriod = time.Second * 10
-)
-
const testConfig = `---
log_level: DEBUG
event_monitoring_config:
- remote_tagger: false
custom_sensitive_words:
- "*custom*"
- network:
- enabled: true
flush_discarder_window: 0
{{if .DisableFilters}}
enable_kernel_filters: false
@@ -55,8 +44,6 @@ event_monitoring_config:
{{if .DisableDiscarders}}
enable_discarders: false
{{end}}
- erpc_dentry_resolution_enabled: {{ .ErpcDentryResolutionEnabled }}
- map_dentry_resolution_enabled: {{ .MapDentryResolutionEnabled }}
envs_with_value:
{{range .EnvsWithValue}}
- {{.}}
@@ -75,49 +62,6 @@ runtime_security_config:
sbom:
enabled: {{ .SBOMEnabled }}
fim_enabled: {{ .FIMEnabled }}
- activity_dump:
- enabled: {{ .EnableActivityDump }}
-{{if .EnableActivityDump}}
- rate_limiter: {{ .ActivityDumpRateLimiter }}
- tag_rules:
- enabled: {{ .ActivityDumpTagRules }}
- dump_duration: {{ .ActivityDumpDuration }}
- {{if .ActivityDumpLoadControllerPeriod }}
- load_controller_period: {{ .ActivityDumpLoadControllerPeriod }}
- {{end}}
- {{if .ActivityDumpCleanupPeriod }}
- cleanup_period: {{ .ActivityDumpCleanupPeriod }}
- {{end}}
- {{if .ActivityDumpLoadControllerTimeout }}
- min_timeout: {{ .ActivityDumpLoadControllerTimeout }}
- {{end}}
- traced_cgroups_count: {{ .ActivityDumpTracedCgroupsCount }}
- cgroup_differentiate_args: {{ .ActivityDumpCgroupDifferentiateArgs }}
- auto_suppression:
- enabled: {{ .ActivityDumpAutoSuppressionEnabled }}
- traced_event_types: {{range .ActivityDumpTracedEventTypes}}
- - {{.}}
- {{end}}
- local_storage:
- output_directory: {{ .ActivityDumpLocalStorageDirectory }}
- compression: {{ .ActivityDumpLocalStorageCompression }}
- formats: {{range .ActivityDumpLocalStorageFormats}}
- - {{.}}
- {{end}}
-{{end}}
- security_profile:
- enabled: {{ .EnableSecurityProfile }}
-{{if .EnableSecurityProfile}}
- dir: {{ .SecurityProfileDir }}
- watch_dir: {{ .SecurityProfileWatchDir }}
- anomaly_detection:
- enabled: true
- default_minimum_stable_period: {{.AnomalyDetectionDefaultMinimumStablePeriod}}
- minimum_stable_period:
- exec: {{.AnomalyDetectionMinimumStablePeriodExec}}
- dns: {{.AnomalyDetectionMinimumStablePeriodDNS}}
- workload_warmup_period: {{.AnomalyDetectionWarmupPeriod}}
-{{end}}
self_test:
enabled: false
@@ -132,8 +76,6 @@ runtime_security_config:
{{range .LogTags}}
- {{.}}
{{end}}
- ebpfless:
- enabled: {{.EBPFLessEnabled}}
enforcement:
exclude_binaries:
- {{ .EnforcementExcludeBinary }}
@@ -148,21 +90,6 @@ runtime_security_config:
period: {{.EnforcementDisarmerExecutablePeriod}}
`
-type onRuleHandler func(*model.Event, *rules.Rule)
-type onProbeEventHandler func(*model.Event)
-type onCustomSendEventHandler func(*rules.Rule, *events.CustomEvent)
-type onSendEventHandler func(*rules.Rule, *model.Event)
-type onDiscarderPushedHandler func(event eval.Event, field eval.Field, eventType eval.EventType) bool
-
-type eventHandlers struct {
- sync.RWMutex
- onRuleMatch onRuleHandler
- onProbeEvent onProbeEventHandler
- onCustomSendEvent onCustomSendEventHandler
- onSendEvent onSendEventHandler
- onDiscarderPushed onDiscarderPushedHandler
-}
-
type testModule struct {
sync.RWMutex
secconfig *secconfig.Config
@@ -179,9 +106,6 @@ type testModule struct {
ruleEngine *rulesmodule.RuleEngine
}
-var testMod *testModule
-var commonCfgDir string
-
func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs []*rules.RuleDefinition, fopts ...optFunc) (*testModule, error) {
var opts tmOpts
@@ -229,7 +153,8 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs []
emopts := eventmonitor.Opts{
StatsdClient: statsdClient,
ProbeOpts: sprobe.Opts{
- StatsdClient: statsdClient,
+ StatsdClient: statsdClient,
+ DontDiscardRuntime: true,
},
}
testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, nil)
@@ -277,7 +202,9 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs []
testMod.RegisterRuleEventHandler(func(e *model.Event, r *rules.Rule) {
opts.staticOpts.snapshotRuleMatchHandler(testMod, e, r)
})
- defer testMod.RegisterRuleEventHandler(nil)
+ t.Cleanup(func() {
+ testMod.RegisterRuleEventHandler(nil)
+ })
}
if err := testMod.eventMonitor.Start(); err != nil {
@@ -297,24 +224,5 @@ func (tm *testModule) Close() {
tm.eventMonitor.Close()
}
-// NewTimeoutError returns a new timeout error with the metrics collected during the test
-func (tm *testModule) NewTimeoutError() ErrTimeout {
- var msg strings.Builder
-
- msg.WriteString("timeout, details: ")
-
- events := tm.ruleEngine.StopEventCollector()
- if len(events) != 0 {
- msg.WriteString("\nevents evaluated:\n")
-
- for _, event := range events {
- msg.WriteString(fmt.Sprintf("%s (eval=%v) {\n", event.Type, event.EvalResult))
- for field, value := range event.Fields {
- msg.WriteString(fmt.Sprintf("\t%s=%v,\n", field, value))
- }
- msg.WriteString("}\n")
- }
- }
-
- return ErrTimeout{msg.String()}
+func (tm *testModule) writePlatformSpecificTimeoutError(b *strings.Builder) {
}
diff --git a/pkg/security/tests/snapshot_test.go b/pkg/security/tests/snapshot_test.go
index fe7707a5fe707..3046d7d97874a 100644
--- a/pkg/security/tests/snapshot_test.go
+++ b/pkg/security/tests/snapshot_test.go
@@ -35,6 +35,17 @@ func TestSnapshot(t *testing.T) {
snapshotRuleMatchHandler: func(testMod *testModule, e *model.Event, r *rules.Rule) {
assertTriggeredRule(t, r, "test_rule_snapshot_host")
testMod.validateExecSchema(t, e)
+ validateProcessContext(t, e)
+
+ // validate that pid 1 is reported as an exec
+ ancestor := e.ProcessContext.Ancestor
+ for ancestor != nil {
+ if ancestor.Pid == 1 && !ancestor.IsExec {
+ t.Errorf("pid1 should be reported as an Exec: %+v", e)
+ }
+ ancestor = ancestor.Ancestor
+ }
+
gotEvent.Store(true)
},
}))
@@ -80,6 +91,7 @@ func TestSnapshot(t *testing.T) {
snapshotRuleMatchHandler: func(testMod *testModule, e *model.Event, r *rules.Rule) {
assertTriggeredRule(t, r, "test_rule_snapshot_container")
testMod.validateExecSchema(t, e)
+ validateProcessContext(t, e)
gotEvent.Store(true)
},
}))
diff --git a/pkg/security/utils/cidr.go b/pkg/security/utils/cidr.go
new file mode 100644
index 0000000000000..5897500b983e8
--- /dev/null
+++ b/pkg/security/utils/cidr.go
@@ -0,0 +1,128 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package utils holds utils related files
+package utils
+
+import (
+ "fmt"
+ "net"
+ "slices"
+)
+
+type byteMaskFilter struct {
+ mask byte
+ ip byte
+ next []*byteMaskFilter
+}
+
+// CIDRSet defines a set of CIDRs
+type CIDRSet struct {
+ cidrList []string
+ cidrGraph []*byteMaskFilter
+}
+
+func appendByteFilter(filter *[]*byteMaskFilter, iter int, ipnet *net.IPNet) {
+ if iter > len(ipnet.IP) {
+ // we already append all needed filters
+ return
+ }
+
+ if ipnet.Mask[iter] == 0 {
+ // filter already pushed
+ return
+ }
+
+ last := false
+ if ipnet.Mask[iter] != 0xFF ||
+ (ipnet.Mask[iter] == 0xFF && iter+1 < len(ipnet.IP) && ipnet.Mask[iter+1] == 0) {
+ // last filter
+ last = true
+ }
+
+ // check filter is not already present:
+ for _, f := range *filter {
+ if f.mask == ipnet.Mask[iter] && f.ip == ipnet.IP[iter] {
+ // found
+ appendByteFilter(&f.next, iter+1, ipnet)
+ return
+ }
+ }
+ newBMF := &byteMaskFilter{
+ mask: ipnet.Mask[iter],
+ ip: ipnet.IP[iter],
+ next: []*byteMaskFilter{},
+ }
+ *filter = append(*filter, newBMF)
+ if last {
+ return
+ }
+ appendByteFilter(&newBMF.next, iter+1, ipnet)
+}
+
+// AppendCIDR appends a CIDR to the set
+func (cs *CIDRSet) AppendCIDR(cidr string) error {
+ if slices.Contains(cs.cidrList, cidr) {
+ return nil // already present
+ }
+
+ _, ipnet, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return err
+ }
+
+ appendByteFilter(&cs.cidrGraph, 0, ipnet)
+ cs.cidrList = append(cs.cidrList, cidr)
+ return nil
+}
+
+func debugFilter(filter *byteMaskFilter, prefix string) {
+ fmt.Printf("%s . ip/mask: %d/%d\n", prefix, filter.ip, filter.mask)
+ for _, f := range filter.next {
+ debugFilter(f, prefix+" ")
+ }
+}
+
+// Debug prints on stdout the content of the CIDR set
+func (cs *CIDRSet) Debug() {
+ fmt.Printf("List of %d CIDR:\n", len(cs.cidrList))
+ for _, cidr := range cs.cidrList {
+ fmt.Printf(" - %s\n", cidr)
+ }
+ fmt.Println("Filter graph:")
+ for _, f := range cs.cidrGraph {
+ debugFilter(f, " ")
+ }
+}
+
+func matchByteMask(maskFilter *[]*byteMaskFilter, iter int, ip []byte) bool {
+ if len(*maskFilter) == 0 {
+ // no more filters
+ return true
+ }
+
+ for _, f := range *maskFilter {
+ if f.ip == (ip[iter] & f.mask) {
+ // match
+ return matchByteMask(&f.next, iter+1, ip)
+ }
+ }
+ // no match
+ return false
+}
+
+// MatchIP returns true if the given IP match the CIDR set
+func (cs *CIDRSet) MatchIP(ipstring string) bool {
+ ipnet := net.ParseIP(ipstring)
+ if ipnet == nil {
+ return false
+ }
+ if ipv4 := ipnet.To4(); ipv4 != nil {
+ return matchByteMask(&cs.cidrGraph, 0, ipv4)
+ } else if ipv6 := ipnet.To16(); ipv6 != nil {
+ return matchByteMask(&cs.cidrGraph, 0, ipv6)
+ }
+ return false
+}
diff --git a/pkg/security/utils/cidr_test.go b/pkg/security/utils/cidr_test.go
new file mode 100644
index 0000000000000..34fc3a58da0d7
--- /dev/null
+++ b/pkg/security/utils/cidr_test.go
@@ -0,0 +1,106 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux || windows
+
+// Package utils holds utils related files
+package utils
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIsIPMatch(t *testing.T) {
+ // DefaultPrivateIPCIDRs is a list of private IP CIDRs that are used to determine if an IP is private or not.
+ var DefaultPrivateIPCIDRs = []string{
+ // IETF RPC 1918
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ // IETF RFC 5735
+ "0.0.0.0/8",
+ "127.0.0.0/8",
+ "169.254.0.0/16",
+ "192.0.0.0/24",
+ "192.0.2.0/24",
+ "192.88.99.0/24",
+ "198.18.0.0/15",
+ "198.51.100.0/24",
+ "203.0.113.0/24",
+ "224.0.0.0/4",
+ "240.0.0.0/4",
+ // IETF RFC 6598
+ "100.64.0.0/10",
+ // // IETF RFC 4193
+ "fc00::/7",
+ }
+
+ cset := CIDRSet{}
+ for _, cidr := range DefaultPrivateIPCIDRs {
+ if err := cset.AppendCIDR(cidr); err != nil {
+ t.Fatalf("failed to append CIDR %s: %v", cidr, err)
+ }
+ }
+
+ // cset.Debug()
+
+ testCases := []struct {
+ name string
+ ip string
+ expected bool
+ }{
+ {
+ name: "dont match 1",
+ ip: "11.1.1.1",
+ expected: false,
+ },
+
+ {
+ name: "dont match 2",
+ ip: "172.48.1.1",
+ expected: false,
+ },
+
+ {
+ name: "dont match 3",
+ ip: "192.167.1.1",
+ expected: false,
+ },
+
+ {
+ name: "match in 24-bit block",
+ ip: "10.11.11.11",
+ expected: true,
+ },
+ {
+ name: "match in 20-bit block",
+ ip: "172.24.11.11",
+ expected: true,
+ },
+ {
+ name: "match in 16-bit block",
+ ip: "192.168.11.11",
+ expected: true,
+ },
+ {
+ name: "IPv6 ULA",
+ ip: "fdf8:b35f:91b1::11",
+ expected: true,
+ },
+ {
+ name: "IPv6 Global",
+ ip: "2001:0:0eab:dead::a0:abcd:4e",
+ expected: false,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ assert.Equal(t, testCase.expected, cset.MatchIP(testCase.ip))
+ })
+ }
+}
diff --git a/pkg/serializer/internal/metrics/origin_mapping.go b/pkg/serializer/internal/metrics/origin_mapping.go
index 8e362c78c99ad..ddf4673925a23 100644
--- a/pkg/serializer/internal/metrics/origin_mapping.go
+++ b/pkg/serializer/internal/metrics/origin_mapping.go
@@ -296,7 +296,8 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 {
metrics.MetricSourceWmiCheck,
metrics.MetricSourceYarn,
metrics.MetricSourceZk,
- metrics.MetricSourceAwsNeuron:
+ metrics.MetricSourceAwsNeuron,
+ metrics.MetricSourceMilvus:
return 11 // integrationMetrics
default:
return 0
@@ -868,6 +869,8 @@ func metricSourceToOriginService(ms metrics.MetricSource) int32 {
return 418
case metrics.MetricSourceTibcoEMS:
return 419
+ case metrics.MetricSourceMilvus:
+ return 425
default:
return 0
}
diff --git a/pkg/serverless/daemon/routes_test.go b/pkg/serverless/daemon/routes_test.go
index 19d55ab3cd098..e0f2fea43a637 100644
--- a/pkg/serverless/daemon/routes_test.go
+++ b/pkg/serverless/daemon/routes_test.go
@@ -20,7 +20,7 @@ import (
"github.com/cihub/seelog"
"github.com/stretchr/testify/assert"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/serverless/invocationlifecycle"
"github.com/DataDog/datadog-agent/pkg/serverless/metrics"
@@ -450,7 +450,7 @@ func startAgents() *Daemon {
ma := &metrics.ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
ma.Start(FlushTimeout, &metrics.MetricConfig{}, &metrics.MetricDogStatsD{})
d.SetStatsdServer(ma)
diff --git a/pkg/serverless/logs/scheduler.go b/pkg/serverless/logs/scheduler.go
index e09cc1cf00736..b5be31485d14f 100644
--- a/pkg/serverless/logs/scheduler.go
+++ b/pkg/serverless/logs/scheduler.go
@@ -6,7 +6,7 @@
package logs
import (
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent"
"github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl"
"github.com/DataDog/datadog-agent/comp/logs/agent/config"
diff --git a/pkg/serverless/metrics/metric.go b/pkg/serverless/metrics/metric.go
index a3f3456a94ef7..75c99f9dbf91e 100644
--- a/pkg/serverless/metrics/metric.go
+++ b/pkg/serverless/metrics/metric.go
@@ -10,7 +10,7 @@ import (
"strings"
"time"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/config/model"
diff --git a/pkg/serverless/metrics/metric_test.go b/pkg/serverless/metrics/metric_test.go
index ba31842f93590..dc3df4790609c 100644
--- a/pkg/serverless/metrics/metric_test.go
+++ b/pkg/serverless/metrics/metric_test.go
@@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl"
+ nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop"
dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server"
"github.com/DataDog/datadog-agent/pkg/aggregator"
pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
@@ -43,7 +43,7 @@ func TestStartDoesNotBlock(t *testing.T) {
pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil)
metricAgent := &ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
defer metricAgent.Stop()
metricAgent.Start(10*time.Second, &MetricConfig{}, &MetricDogStatsD{})
@@ -66,7 +66,7 @@ func (m *InvalidMetricConfigMocked) GetMultipleEndpoints() (map[string][]string,
func TestStartInvalidConfig(t *testing.T) {
metricAgent := &ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
defer metricAgent.Stop()
metricAgent.Start(1*time.Second, &InvalidMetricConfigMocked{}, &MetricDogStatsD{})
@@ -84,7 +84,7 @@ func (m *MetricDogStatsDMocked) NewServer(_ aggregator.Demultiplexer) (dogstatsd
func TestStartInvalidDogStatsD(t *testing.T) {
metricAgent := &ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
defer metricAgent.Stop()
metricAgent.Start(1*time.Second, &MetricConfig{}, &MetricDogStatsDMocked{})
@@ -101,7 +101,7 @@ func TestStartWithProxy(t *testing.T) {
metricAgent := &ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
defer metricAgent.Stop()
metricAgent.Start(10*time.Second, &MetricConfig{}, &MetricDogStatsD{})
@@ -121,7 +121,7 @@ func TestRaceFlushVersusAddSample(t *testing.T) {
}
metricAgent := &ServerlessMetricAgent{
SketchesBucketOffset: time.Second * 10,
- Tagger: nooptagger.NewTaggerClient(),
+ Tagger: nooptagger.NewComponent(),
}
defer metricAgent.Stop()
metricAgent.Start(10*time.Second, &ValidMetricConfigMocked{}, &MetricDogStatsD{})
@@ -216,7 +216,7 @@ func TestRaceFlushVersusParsePacket(t *testing.T) {
require.NoError(t, err)
pkgconfigsetup.Datadog().SetDefault("dogstatsd_port", port)
- demux := aggregator.InitAndStartServerlessDemultiplexer(nil, time.Second*1000, nooptagger.NewTaggerClient())
+ demux := aggregator.InitAndStartServerlessDemultiplexer(nil, time.Second*1000, nooptagger.NewComponent())
s, err := dogstatsdServer.NewServerlessServer(demux)
require.NoError(t, err, "cannot start DSD")
diff --git a/pkg/serverless/trace/trace.go b/pkg/serverless/trace/trace.go
index 0227897547bee..0ace7457d6f27 100644
--- a/pkg/serverless/trace/trace.go
+++ b/pkg/serverless/trace/trace.go
@@ -15,7 +15,7 @@ import (
"github.com/DataDog/datadog-agent/cmd/serverless-init/cloudservice"
compcorecfg "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
zstd "github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd"
comptracecfg "github.com/DataDog/datadog-agent/comp/trace/config"
"github.com/DataDog/datadog-agent/pkg/config/model"
diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod
index 0e9eccb70306c..153ebf8b9f1e3 100644
--- a/pkg/trace/go.mod
+++ b/pkg/trace/go.mod
@@ -36,22 +36,27 @@ require (
github.com/stretchr/testify v1.9.0
github.com/tinylib/msgp v1.1.8
github.com/vmihailenco/msgpack/v4 v4.3.12
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/consumer v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
- go.opentelemetry.io/collector/processor v0.111.0
- go.opentelemetry.io/collector/semconv v0.111.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/consumer v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
+ go.opentelemetry.io/collector/processor/processortest v0.113.0
+ go.opentelemetry.io/collector/semconv v0.113.0
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/metric v1.31.0
go.uber.org/atomic v1.11.0
golang.org/x/sys v0.26.0
golang.org/x/time v0.7.0
google.golang.org/grpc v1.67.1
- google.golang.org/protobuf v1.34.2
+ google.golang.org/protobuf v1.35.1
gopkg.in/ini.v1 v1.67.0
k8s.io/apimachinery v0.25.5
)
+require (
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor v0.113.0 // indirect
+)
+
require (
github.com/DataDog/go-sqllexer v0.0.16 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
@@ -86,15 +91,14 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/vmihailenco/tagparser v0.1.2 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
- go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
+ go.opentelemetry.io/collector/component/componentstatus v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect
+ go.opentelemetry.io/collector/consumer/consumertest v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum
index bebbccd8d58e1..9caaa1ba3bf3d 100644
--- a/pkg/trace/go.sum
+++ b/pkg/trace/go.sum
@@ -190,14 +190,14 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM=
go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 h1:KH0ABOBfSPp5XZtHkoXeI9wKoOD9B0eN6TDo08SwN/c=
@@ -212,12 +212,14 @@ go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5id
go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg=
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU=
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
@@ -236,24 +238,26 @@ go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTt
go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis=
go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0 h1:kiXvbIR1K8Tcv10ffaA9MvcPoGpm6uitaXzfhDZnV3o=
go.opentelemetry.io/collector/otelcol/otelcoltest v0.111.0/go.mod h1:7jwDuhMkglGVSyJT6CQ1vE7A6fjYTvbap7/QVl3P8kQ=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
@@ -400,8 +404,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod
index 8cd7813ca1f2a..9a79b9f63e2d3 100644
--- a/pkg/trace/stats/oteltest/go.mod
+++ b/pkg/trace/stats/oteltest/go.mod
@@ -10,11 +10,11 @@ require (
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0
github.com/google/go-cmp v0.6.0
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.111.0
- go.opentelemetry.io/collector/pdata v1.17.0
- go.opentelemetry.io/collector/semconv v0.111.0
+ go.opentelemetry.io/collector/component v0.113.0
+ go.opentelemetry.io/collector/pdata v1.19.0
+ go.opentelemetry.io/collector/semconv v0.113.0
go.opentelemetry.io/otel/metric v1.31.0
- google.golang.org/protobuf v1.34.2
+ google.golang.org/protobuf v1.35.1
)
require (
@@ -63,7 +63,7 @@ require (
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum
index bea707d5be0ce..ba8958d6623f2 100644
--- a/pkg/trace/stats/oteltest/go.sum
+++ b/pkg/trace/stats/oteltest/go.sum
@@ -134,34 +134,34 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
@@ -262,8 +262,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/pkg/util/grpc/auth.go b/pkg/util/grpc/auth.go
index dcf428e8ec5b8..382c4838919a6 100644
--- a/pkg/util/grpc/auth.go
+++ b/pkg/util/grpc/auth.go
@@ -7,9 +7,12 @@ package grpc
import (
"context"
+ "errors"
+ "fmt"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
grpccontext "github.com/DataDog/datadog-agent/pkg/util/grpc/context"
@@ -37,3 +40,36 @@ func AuthInterceptor(verifier verifierFunc) grpc_auth.AuthFunc {
return context.WithValue(ctx, grpccontext.ContextKeyTokenInfoID, tokenInfo), nil
}
}
+
+// StaticAuthInterceptor is a gRPC interceptor that extracts an auth token from the request headers, and validates it
+// using the given token.
+func StaticAuthInterceptor(token string) grpc_auth.AuthFunc {
+ return AuthInterceptor(func(reqToken string) (interface{}, error) {
+ if reqToken != token {
+ return struct{}{}, errors.New("invalid session token")
+ }
+
+ return struct{}{}, nil
+ })
+}
+
+type bearerTokenAuth struct {
+ token string
+}
+
+func (b bearerTokenAuth) GetRequestMetadata(_ context.Context, _ ...string) (map[string]string, error) {
+ return map[string]string{
+ "authorization": fmt.Sprintf("Bearer %s", b.token),
+ }, nil
+}
+
+func (b bearerTokenAuth) RequireTransportSecurity() bool {
+ return true
+}
+
+// NewBearerTokenAuth creates a set of per-RPC credentials that uses a bearer token for authentication/authorization.
+//
+// This credentials implementation requires the connection to be secure (i.e. using TLS).
+func NewBearerTokenAuth(token string) credentials.PerRPCCredentials {
+ return bearerTokenAuth{token: token}
+}
diff --git a/pkg/util/kernel/download_headers.go b/pkg/util/kernel/download_headers.go
index 49cea0db74efb..554414312ac88 100644
--- a/pkg/util/kernel/download_headers.go
+++ b/pkg/util/kernel/download_headers.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux
+//go:build linux && linux_bpf
package kernel
diff --git a/pkg/util/kernel/download_headers_test.go b/pkg/util/kernel/download_headers_test.go
index 239306544cf1a..4439fac96ff3f 100644
--- a/pkg/util/kernel/download_headers_test.go
+++ b/pkg/util/kernel/download_headers_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux
+//go:build linux && linux_bpf
package kernel
diff --git a/pkg/util/kernel/find_headers.go b/pkg/util/kernel/find_headers.go
index 245e582bad16b..ad7feb779d5eb 100644
--- a/pkg/util/kernel/find_headers.go
+++ b/pkg/util/kernel/find_headers.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux
+//go:build linux && linux_bpf
package kernel
diff --git a/pkg/util/kernel/find_headers_test.go b/pkg/util/kernel/find_headers_test.go
index 9de3abd068445..79597b3eac84d 100644
--- a/pkg/util/kernel/find_headers_test.go
+++ b/pkg/util/kernel/find_headers_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux
+//go:build linux && linux_bpf
package kernel
diff --git a/pkg/util/kernel/proc_linux_test.go b/pkg/util/kernel/proc_linux_test.go
index 2aa5fb380b558..ee80d7e5f8a92 100644
--- a/pkg/util/kernel/proc_linux_test.go
+++ b/pkg/util/kernel/proc_linux_test.go
@@ -110,6 +110,12 @@ func TestGetEnvVariableFromBuffer(t *testing.T) {
envVar: "MY_VAR",
expected: "myvar",
},
+ {
+ name: "LastVarWithNoTrailingNull",
+ contents: "PATH=/usr/bin\x00MY_VAR=myvar",
+ envVar: "MY_VAR",
+ expected: "myvar",
+ },
}
for _, tc := range cases {
diff --git a/pkg/util/safeelf/elf.go b/pkg/util/safeelf/elf.go
new file mode 100644
index 0000000000000..0b94bbb956046
--- /dev/null
+++ b/pkg/util/safeelf/elf.go
@@ -0,0 +1,136 @@
+// This file is licensed under the MIT License.
+//
+// Copyright (c) 2017 Nathan Sweet
+// Copyright (c) 2018, 2019 Cloudflare
+// Copyright (c) 2019 Authors of Cilium
+//
+// MIT License
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+// Package safeelf provides safe (from panics) wrappers around ELF parsing
+package safeelf
+
+import (
+ "debug/dwarf"
+ "debug/elf" //nolint:depguard
+ "fmt"
+ "io"
+)
+
+// File is a safe wrapper around *elf.File that handles any panics in parsing
+type File struct {
+ *elf.File
+}
+
+// NewFile reads an ELF safely.
+//
+// Any panic during parsing is turned into an error. This is necessary since
+// there are a bunch of unfixed bugs in debug/elf.
+//
+// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
+func NewFile(r io.ReaderAt) (safe *File, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return &File{file}, nil
+}
+
+// Open reads an ELF from a file.
+//
+// It works like NewFile, with the exception that safe.Close will
+// close the underlying file.
+func Open(path string) (safe *File, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &File{file}, nil
+}
+
+// Symbols is the safe version of elf.File.Symbols.
+func (se *File) Symbols() (syms []Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.Symbols()
+ return
+}
+
+// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
+func (se *File) DynamicSymbols() (syms []Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.DynamicSymbols()
+ return
+}
+
+// DWARF is the safe version of elf.File.DWARF.
+func (se *File) DWARF() (d *dwarf.Data, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ d = nil
+ err = fmt.Errorf("reading ELF DWARF panicked: %s", r)
+ }()
+
+ d, err = se.File.DWARF()
+ return
+}
diff --git a/pkg/util/safeelf/types.go b/pkg/util/safeelf/types.go
new file mode 100644
index 0000000000000..dab4baca56bb5
--- /dev/null
+++ b/pkg/util/safeelf/types.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+//nolint:revive
+package safeelf
+
+import "debug/elf" //nolint:depguard
+
+type Prog = elf.Prog
+type Symbol = elf.Symbol
+type Section = elf.Section
+type SectionType = elf.SectionType
+type SectionIndex = elf.SectionIndex
+
+var ErrNoSymbols = elf.ErrNoSymbols
+
+const SHF_ALLOC = elf.SHF_ALLOC
+const SHF_EXECINSTR = elf.SHF_EXECINSTR
+
+const SHT_SYMTAB = elf.SHT_SYMTAB
+const SHT_DYNSYM = elf.SHT_DYNSYM
+
+const ET_EXEC = elf.ET_EXEC
+const ET_DYN = elf.ET_DYN
+
+const PT_LOAD = elf.PT_LOAD
+const PT_TLS = elf.PT_TLS
+
+const EM_X86_64 = elf.EM_X86_64
+const EM_AARCH64 = elf.EM_AARCH64
+
+const Sym32Size = elf.Sym32Size
+const Sym64Size = elf.Sym64Size
+
+const ELFCLASS32 = elf.ELFCLASS32
+const ELFCLASS64 = elf.ELFCLASS64
+
+const PF_X = elf.PF_X
+const PF_W = elf.PF_W
+
+const STB_GLOBAL = elf.STB_GLOBAL
+const STT_FUNC = elf.STT_FUNC
diff --git a/pkg/util/utilizationtracker/doc.go b/pkg/util/utilizationtracker/doc.go
new file mode 100644
index 0000000000000..f039c62deb991
--- /dev/null
+++ b/pkg/util/utilizationtracker/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package utilizationtracker provides a utility to track the utilization of a component.
+package utilizationtracker
diff --git a/pkg/util/utilizationtracker/go.mod b/pkg/util/utilizationtracker/go.mod
new file mode 100644
index 0000000000000..fd269ca0657a7
--- /dev/null
+++ b/pkg/util/utilizationtracker/go.mod
@@ -0,0 +1,14 @@
+module github.com/DataDog/datadog-agent/pkg/util/utilizationtracker
+
+go 1.22.0
+
+require (
+ github.com/benbjohnson/clock v1.3.5
+ github.com/stretchr/testify v1.9.0
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/pkg/util/utilizationtracker/go.sum b/pkg/util/utilizationtracker/go.sum
new file mode 100644
index 0000000000000..29fbd520c2821
--- /dev/null
+++ b/pkg/util/utilizationtracker/go.sum
@@ -0,0 +1,12 @@
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/pkg/collector/worker/utilization_tracker.go b/pkg/util/utilizationtracker/utilization_tracker.go
similarity index 61%
rename from pkg/collector/worker/utilization_tracker.go
rename to pkg/util/utilizationtracker/utilization_tracker.go
index 24127081983d4..b57d14b4b4778 100644
--- a/pkg/collector/worker/utilization_tracker.go
+++ b/pkg/util/utilizationtracker/utilization_tracker.go
@@ -3,7 +3,8 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package worker
+// Package utilizationtracker provides a utility to track the utilization of a component.
+package utilizationtracker
import (
"time"
@@ -14,12 +15,12 @@ import (
type trackerEvent int
const (
- checkStarted trackerEvent = iota
- checkStopped
+ started trackerEvent = iota
+ stopped
trackerTick
)
-//nolint:revive // TODO(AML) Fix revive linter
+// UtilizationTracker tracks the utilization of a component.
type UtilizationTracker struct {
Output chan float64
@@ -32,9 +33,9 @@ type UtilizationTracker struct {
// alpha is the ewma smoothing factor.
alpha float64
- checkStarted time.Time
- nextTick time.Time
- interval time.Duration
+ started time.Time
+ nextTick time.Time
+ interval time.Duration
clock clock.Clock
}
@@ -42,23 +43,20 @@ type UtilizationTracker struct {
// NewUtilizationTracker instantiates and configures a utilization tracker that
// calculates the values and publishes them to expvars
func NewUtilizationTracker(
- workerName string,
interval time.Duration,
+ alpha float64,
) *UtilizationTracker {
return newUtilizationTrackerWithClock(
- workerName,
interval,
clock.New(),
+ alpha,
)
}
// newUtilizationTrackerWithClock is primarely used for testing.
-//
// Does not start the background goroutines, so that the tests can call update() to get
// deterministic results.
-//
-//nolint:revive // TODO(AML) Fix revive linter
-func newUtilizationTrackerWithClock(_ string, interval time.Duration, clk clock.Clock) *UtilizationTracker {
+func newUtilizationTrackerWithClock(interval time.Duration, clk clock.Clock, alpha float64) *UtilizationTracker {
ut := &UtilizationTracker{
clock: clk,
@@ -66,9 +64,8 @@ func newUtilizationTrackerWithClock(_ string, interval time.Duration, clk clock.
nextTick: clk.Now(),
interval: interval,
- alpha: 0.25, // converges to 99.98% of constant input in 30 iterations.
-
- Output: make(chan float64, 1),
+ alpha: alpha,
+ Output: make(chan float64, 1),
}
go ut.run()
@@ -86,12 +83,12 @@ func (ut *UtilizationTracker) run() {
// invariant: ut.nextTick > now
switch ev {
- case checkStarted:
- // invariant: ut.nextTick > ut.checkStarted
- ut.checkStarted = now
- case checkStopped:
- ut.busy += now.Sub(ut.checkStarted)
- ut.checkStarted = time.Time{}
+ case started:
+ // invariant: ut.nextTick > ut.started
+ ut.started = now
+ case stopped:
+ ut.busy += now.Sub(ut.started)
+ ut.started = time.Time{}
case trackerTick:
// nothing, just tick
}
@@ -100,10 +97,10 @@ func (ut *UtilizationTracker) run() {
func (ut *UtilizationTracker) update(now time.Time) {
for ut.nextTick.Before(now) {
- if !ut.checkStarted.IsZero() {
- // invariant: ut.nextTick > ut.checkStarted
- ut.busy += ut.nextTick.Sub(ut.checkStarted)
- ut.checkStarted = ut.nextTick
+ if !ut.started.IsZero() {
+ // invariant: ut.nextTick > ut.started
+ ut.busy += ut.nextTick.Sub(ut.started)
+ ut.started = ut.nextTick
}
update := float64(ut.busy) / float64(ut.interval)
@@ -116,32 +113,32 @@ func (ut *UtilizationTracker) update(now time.Time) {
ut.Output <- ut.value
}
-// Stop should be invoked when a worker is about to exit
-// so that we can remove the instance's expvars
+// Stop should be invoked when a component is about to exit
+// so that we can clean up the instances resources.
func (ut *UtilizationTracker) Stop() {
// The user will not send anything anymore
close(ut.eventsChan)
}
-// Tick updates to the utilization during intervals where no check were started or stopped.
+// Tick updates to the utilization during intervals where no component were started or stopped.
//
// Produces one value on the Output channel.
func (ut *UtilizationTracker) Tick() {
ut.eventsChan <- trackerTick
}
-// CheckStarted should be invoked when a worker's check is about to run so that we can track the
+// Started should be invoked when a compnent's work is about to being so that we can track the
// start time and the utilization.
//
// Produces one value on the Output channel.
-func (ut *UtilizationTracker) CheckStarted() {
- ut.eventsChan <- checkStarted
+func (ut *UtilizationTracker) Started() {
+ ut.eventsChan <- started
}
-// CheckFinished should be invoked when a worker's check is complete so that we can calculate the
-// utilization of the linked worker.
+// Finished should be invoked when a compnent's work is complete so that we can calculate the
+// utilization of the compoennt.
//
// Produces one value on the Output channel.
-func (ut *UtilizationTracker) CheckFinished() {
- ut.eventsChan <- checkStopped
+func (ut *UtilizationTracker) Finished() {
+ ut.eventsChan <- stopped
}
diff --git a/pkg/collector/worker/utilization_tracker_test.go b/pkg/util/utilizationtracker/utilization_tracker_test.go
similarity index 94%
rename from pkg/collector/worker/utilization_tracker_test.go
rename to pkg/util/utilizationtracker/utilization_tracker_test.go
index 9fef376d2c6b4..52af4667d6fdb 100644
--- a/pkg/collector/worker/utilization_tracker_test.go
+++ b/pkg/util/utilizationtracker/utilization_tracker_test.go
@@ -3,7 +3,8 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-package worker
+// Package utilizationtracker provides a utility to track the utilization of a component.
+package utilizationtracker
import (
"math/rand"
@@ -21,9 +22,9 @@ import (
func newTracker(_ *testing.T) (*UtilizationTracker, *clock.Mock) {
clk := clock.NewMock()
ut := newUtilizationTrackerWithClock(
- "worker",
100*time.Millisecond,
clk,
+ 0.25,
)
return ut, clk
@@ -49,7 +50,7 @@ func TestUtilizationTracker(t *testing.T) {
clk.Add(300 * time.Millisecond)
// Ramp up the expected utilization
- ut.CheckStarted()
+ ut.Started()
//nolint:revive // TODO(AML) Fix revive linter
old, new = new, <-ut.Output
require.Equal(t, old, new)
@@ -67,7 +68,7 @@ func TestUtilizationTracker(t *testing.T) {
require.Greater(t, new, old)
// Ramp down the expected utilization
- ut.CheckFinished()
+ ut.Finished()
//nolint:revive // TODO(AML) Fix revive linter
old, new = new, <-ut.Output
require.Equal(t, old, new) //no time have passed
@@ -99,7 +100,7 @@ func TestUtilizationTrackerCheckLifecycle(t *testing.T) {
for idx := 0; idx < 3; idx++ {
// Ramp up utilization
- ut.CheckStarted()
+ ut.Started()
//nolint:revive // TODO(AML) Fix revive linter
old, new = new, <-ut.Output
assert.Equal(t, old, new)
@@ -117,7 +118,7 @@ func TestUtilizationTrackerCheckLifecycle(t *testing.T) {
assert.Greater(t, new, old)
// Ramp down utilization
- ut.CheckFinished()
+ ut.Finished()
//nolint:revive // TODO(AML) Fix revive linter
old, new = new, <-ut.Output
assert.Equal(t, new, old)
@@ -151,13 +152,13 @@ func TestUtilizationTrackerAccuracy(t *testing.T) {
totalMs := r.Int31n(100) + 100
runtimeMs := (totalMs * 30) / 100
- ut.CheckStarted()
+ ut.Started()
<-ut.Output
runtimeDuration := time.Duration(runtimeMs) * time.Millisecond
clk.Add(runtimeDuration)
- ut.CheckFinished()
+ ut.Finished()
val = <-ut.Output
idleDuration := time.Duration(totalMs-runtimeMs) * time.Millisecond
diff --git a/release.json b/release.json
index 58fdf6d8bf7bd..51be2bf1b6144 100644
--- a/release.json
+++ b/release.json
@@ -7,7 +7,7 @@
},
"nightly": {
"INTEGRATIONS_CORE_VERSION": "master",
- "OMNIBUS_SOFTWARE_VERSION": "cf7fc9c3ab792ebf97dd7c2ce0be39a6e197f6f5",
+ "OMNIBUS_SOFTWARE_VERSION": "58b335c2c49efc266e9e707d9a2a36198ff8f1a3",
"OMNIBUS_RUBY_VERSION": "d365e483ee05a13e55eeb5208d11452c5b65afbb",
"JMXFETCH_VERSION": "0.49.6",
"JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9",
@@ -26,7 +26,7 @@
},
"nightly-a7": {
"INTEGRATIONS_CORE_VERSION": "master",
- "OMNIBUS_SOFTWARE_VERSION": "cf7fc9c3ab792ebf97dd7c2ce0be39a6e197f6f5",
+ "OMNIBUS_SOFTWARE_VERSION": "58b335c2c49efc266e9e707d9a2a36198ff8f1a3",
"OMNIBUS_RUBY_VERSION": "d365e483ee05a13e55eeb5208d11452c5b65afbb",
"JMXFETCH_VERSION": "0.49.6",
"JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9",
diff --git a/releasenotes/notes/aurora-cluster-pagination-47707cb5c01464a5.yaml b/releasenotes/notes/aurora-cluster-pagination-47707cb5c01464a5.yaml
new file mode 100644
index 0000000000000..51373289ccf77
--- /dev/null
+++ b/releasenotes/notes/aurora-cluster-pagination-47707cb5c01464a5.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+enhancements:
+ - |
+ Added support for more than 100 Aurora clusters in a user's account when using database autodiscovery
diff --git a/releasenotes/notes/milvus-origin-1f569a4c954c8b61.yaml b/releasenotes/notes/milvus-origin-1f569a4c954c8b61.yaml
new file mode 100644
index 0000000000000..10565830c362c
--- /dev/null
+++ b/releasenotes/notes/milvus-origin-1f569a4c954c8b61.yaml
@@ -0,0 +1,11 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+other:
+ - |
+ Added origin for new Milvus integration.
diff --git a/releasenotes/notes/remove-rpath-patching-linux-b7afc259e338e1b5.yaml b/releasenotes/notes/remove-rpath-patching-linux-b7afc259e338e1b5.yaml
new file mode 100644
index 0000000000000..9f6853a7aaecf
--- /dev/null
+++ b/releasenotes/notes/remove-rpath-patching-linux-b7afc259e338e1b5.yaml
@@ -0,0 +1,13 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+
+fixes:
+ - |
+ Fix a regression that caused the Agent to not be able to run if its
+ capabilities had been modified with the `setcap` command.
diff --git a/releasenotes/notes/use-remote-tagger-for-trace-and-security-agents-9650d1be5d49e47f.yaml b/releasenotes/notes/use-remote-tagger-for-trace-and-security-agents-9650d1be5d49e47f.yaml
new file mode 100644
index 0000000000000..58df7e89e677a
--- /dev/null
+++ b/releasenotes/notes/use-remote-tagger-for-trace-and-security-agents-9650d1be5d49e47f.yaml
@@ -0,0 +1,12 @@
+# Each section from every release note are combined when the
+# CHANGELOG.rst is rendered. So the text needs to be worded so that
+# it does not depend on any information only available in another
+# section. This may mean repeating some details, but each section
+# must be readable independently of the other.
+#
+# Each section note must be formatted as reStructuredText.
+---
+deprecations:
+ - |
+ The remote tagger for the trace-agent and security-agent is now always enabled and can not be disabled
+ ``apm_config.remote_tagger``, ``security_agent.remote_tagger``, and ``event_monitoring_config.remote_tagger`` config entries are removed.
diff --git a/tasks/agent.py b/tasks/agent.py
index d4bebb0b19835..ea0a77da5a501 100644
--- a/tasks/agent.py
+++ b/tasks/agent.py
@@ -37,12 +37,6 @@
BIN_PATH = os.path.join(BIN_DIR, "agent")
AGENT_TAG = "datadog/agent:master"
-BUNDLED_AGENTS = {
- # system-probe requires a working compilation environment for eBPF so we do not
- # enable it by default but we enable it in the released artifacts.
- AgentFlavor.base: ["process-agent", "trace-agent", "security-agent"],
-}
-
if sys.platform == "win32":
# Our `ridk enable` toolchain puts Ruby's bin dir at the front of the PATH
# This dir contains `aws.rb` which will execute if we just call `aws`,
@@ -192,7 +186,7 @@ def build(
out="cmd/agent/rsrc.syso",
)
else:
- bundled_agents += bundle or BUNDLED_AGENTS.get(flavor, [])
+ bundled_agents += bundle or []
if flavor.is_iot():
# Iot mode overrides whatever passed through `--build-exclude` and `--build-include`
@@ -945,3 +939,12 @@ def generate_config(ctx, build_type, output_file, env=None):
}
cmd = "go run {go_file} {build_type} {template_file} {output_file}"
return ctx.run(cmd.format(**args), env=env or {})
+
+
+@task()
+def build_remote_agent(ctx, env=None):
+ """
+ Builds the remote-agent example client.
+ """
+ cmd = "go build -v -o bin/remote-agent ./internal/remote-agent"
+ return ctx.run(cmd, env=env or {})
diff --git a/tasks/collector.py b/tasks/collector.py
index 5a94e2d34659d..28c2b2e9540bd 100644
--- a/tasks/collector.py
+++ b/tasks/collector.py
@@ -1,15 +1,18 @@
import os
import platform
+import re
import shutil
import subprocess
import tempfile
import urllib.request
+import requests
import yaml
from invoke.exceptions import Exit
from invoke.tasks import task
from tasks.go import tidy
+from tasks.libs.ciproviders.github_api import GithubAPI
from tasks.libs.common.color import Color, color_message
LICENSE_HEADER = """// Unless explicitly stated otherwise all files in this repository are licensed
@@ -17,7 +20,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
"""
-OCB_VERSION = "0.111.0"
+OCB_VERSION = "0.113.0"
MANDATORY_COMPONENTS = {
"extensions": [
@@ -56,6 +59,8 @@
},
}
+MANIFEST_FILE = "./comp/otelcol/collector-contrib/impl/manifest.yaml"
+
class YAMLValidationError(Exception):
def __init__(self, message):
@@ -82,13 +87,26 @@ def find_matching_components(manifest, components_to_match: dict, present: bool)
return res
+def versions_equal(version1, version2):
+ # strip leading 'v' if present
+ if version1.startswith("v"):
+ version1 = version1[1:]
+ if version2.startswith("v"):
+ version2 = version2[1:]
+ # Split the version strings by '.'
+ parts1 = version1.split(".")
+ parts2 = version2.split(".")
+ # Compare the first two parts (major and minor versions)
+ return parts1[0] == parts2[0] and parts1[1] == parts2[1]
+
+
def validate_manifest(manifest) -> list:
"""Return a list of components to remove, or empty list if valid.
If invalid components are found, raise a YAMLValidationError."""
# validate collector version matches ocb version
manifest_version = manifest.get("dist", {}).get("otelcol_version")
- if manifest_version and manifest_version != OCB_VERSION:
+ if manifest_version and not versions_equal(manifest_version, OCB_VERSION):
raise YAMLValidationError(
f"Collector version ({manifest_version}) in manifest does not match required OCB version ({OCB_VERSION})"
)
@@ -153,7 +171,6 @@ def generate(ctx):
binary_url = f"{BASE_URL}{binary_name}"
- config_path = "./comp/otelcol/collector-contrib/impl/manifest.yaml"
with tempfile.TemporaryDirectory() as tmpdirname:
binary_path = os.path.join(tmpdirname, binary_name)
print(f"Downloading {binary_url} to {binary_path}...")
@@ -169,7 +186,7 @@ def generate(ctx):
) from e
# Run the binary with specified options
- run_command = f"{binary_path} --config {config_path} --skip-compilation"
+ run_command = f"{binary_path} --config {MANIFEST_FILE} --skip-compilation"
print(f"Running command: {run_command}")
try:
@@ -189,7 +206,7 @@ def generate(ctx):
output_path = None
components_to_remove = []
try:
- with open(config_path) as file:
+ with open(MANIFEST_FILE) as file:
manifest = yaml.safe_load(file)
output_path = manifest["dist"]["output_path"]
components_to_remove = validate_manifest(manifest)
@@ -200,7 +217,7 @@ def generate(ctx):
) from e
if components_to_remove:
- strip_invalid_components(config_path, components_to_remove)
+ strip_invalid_components(MANIFEST_FILE, components_to_remove)
if output_path != impl_path:
files_to_copy = ["components.go", "go.mod"]
@@ -242,3 +259,206 @@ def generate(ctx):
f.write(content)
print(f"Updated package name and ensured license header in: {file_path}")
+
+
+def fetch_latest_release(repo):
+ gh = GithubAPI(repo)
+ return gh.latest_release()
+
+
+def fetch_core_module_versions(version):
+ """
+ Fetch versions.yaml from the provided URL and build a map of modules with their versions.
+ """
+ url = f"https://raw.githubusercontent.com/open-telemetry/opentelemetry-collector/{version}/versions.yaml"
+ print(f"Fetching versions from {url}")
+
+ try:
+ response = requests.get(url)
+ response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
+ except requests.exceptions.RequestException as e:
+ raise Exit(
+ color_message(f"Failed to fetch the YAML file: {e}", Color.RED),
+ code=1,
+ ) from e
+
+ yaml_content = response.content
+
+ try:
+ data = yaml.safe_load(yaml_content)
+ except yaml.YAMLError as e:
+ raise Exit(
+ color_message(f"Failed to parse YAML content: {e}", Color.RED),
+ code=1,
+ ) from e
+
+ version_modules = {}
+ for _, details in data.get("module-sets", {}).items():
+ version = details.get("version", "unknown")
+ for module in details.get("modules", []):
+ version_modules[version] = version_modules.get(version, []) + [module]
+ return version_modules
+
+
+def update_go_mod_file(go_mod_path, collector_version_modules):
+ print(f"Updating {go_mod_path}")
+ # Read all lines from the go.mod file
+ with open(go_mod_path) as file:
+ lines = file.readlines()
+
+ updated_lines = []
+ file_updated = False # To check if the file was modified
+
+ # Compile a regex for each module to match the module name exactly
+ compiled_modules = {
+ module: re.compile(rf"^\s*{re.escape(module)}\s+v[\d\.]+")
+ for _, modules in collector_version_modules.items()
+ for module in modules
+ }
+ for line in lines:
+ updated_line = line
+ for version, modules in collector_version_modules.items():
+ for module in modules:
+ module_regex = compiled_modules[module]
+ match = module_regex.match(line)
+ if match:
+ print(f"Updating {module} to version {version} in {go_mod_path}")
+ updated_line = f"{match.group(0).split()[0]} {version}\n"
+ file_updated = True
+ break # Stop checking other modules once we find a match
+ if updated_line != line:
+ break # If the line was updated, stop checking other versions
+ updated_lines.append(updated_line)
+
+ # Write the updated lines back to the file only if changes were made
+ if file_updated:
+ with open(go_mod_path, "w") as file:
+ file.writelines(updated_lines)
+ print(f"{go_mod_path} updated.")
+ else:
+ print(f"No changes made to {go_mod_path}.")
+
+
+def update_all_go_mod(collector_version_modules):
+ for root, _, files in os.walk("."):
+ if "go.mod" in files:
+ go_mod_path = os.path.join(root, "go.mod")
+ update_go_mod_file(go_mod_path, collector_version_modules)
+ print("All go.mod files updated.")
+
+
+def read_old_version(filepath):
+ """Reads the old version from the manifest.yaml file."""
+ version_regex = re.compile(r"^\s*version:\s+([\d\.]+)")
+ with open(filepath) as file:
+ for line in file:
+ match = version_regex.match(line)
+ if match:
+ return match.group(1)
+ return None
+
+
+def update_file(filepath, old_version, new_version):
+ """Updates all instances of the old version to the new version in the file."""
+ print(f"Updating all instances of {old_version} to {new_version} in {filepath}")
+ with open(filepath) as file:
+ content = file.read()
+
+ # Replace all occurrences of the old version with the new version
+ updated_content = content.replace(old_version, new_version)
+
+ # Write the updated content back to the file
+ with open(filepath, "w") as file:
+ file.write(updated_content)
+
+ print(f"Updated all instances of {old_version} to {new_version} in {filepath}")
+
+
+def update_core_collector():
+ print("Updating the core collector version in all go.mod files and manifest.yaml file.")
+ repo = "open-telemetry/opentelemetry-collector"
+ collector_version = fetch_latest_release(repo)
+ if collector_version:
+ print(f"Latest release for {repo}: {collector_version}")
+ version_modules = fetch_core_module_versions(collector_version)
+ update_all_go_mod(version_modules)
+ old_version = read_old_version(MANIFEST_FILE)
+ if old_version:
+ collector_version = collector_version[1:]
+ update_file(MANIFEST_FILE, old_version, collector_version)
+ update_file(
+ "./comp/otelcol/collector/impl/collector.go",
+ old_version,
+ collector_version,
+ )
+ # update other files
+ update_file("./tasks/collector.py", old_version, collector_version)
+ for root, _, files in os.walk("./tasks/unit_tests/testdata/collector"):
+ for file in files:
+ update_file(os.path.join(root, file), old_version, collector_version)
+ update_file(
+ "./test/otel/testdata/builder-config.yaml",
+ old_version,
+ collector_version,
+ )
+ update_file("./.gitlab/integration_test/otel.yml", old_version, collector_version)
+
+ else:
+ print(f"Failed to fetch the latest release for {repo}")
+
+ print("Core collector update complete.")
+
+
+def update_versions_in_yaml(yaml_file_path, new_version, component_prefix):
+ with open(yaml_file_path) as file:
+ data = yaml.safe_load(file)
+
+ # Function to update versions in a list of components
+ def update_component_versions(components):
+ for i, component in enumerate(components):
+ if "gomod" in component and component_prefix in component["gomod"]:
+ parts = component["gomod"].split(" ")
+ if len(parts) == 2:
+ parts[1] = new_version
+ components[i]["gomod"] = " ".join(parts)
+
+ # Update extensions, receivers, processors, and exporters
+ for key in ["extensions", "receivers", "processors", "exporters", "connectors"]:
+ if key in data:
+ update_component_versions(data[key])
+
+ with open(yaml_file_path, "w") as file:
+ yaml.dump(data, file, default_flow_style=False)
+
+ print(
+ f"Updated YAML file at {yaml_file_path} with new version {new_version} for components matching '{component_prefix}'."
+ )
+
+
+def update_collector_contrib():
+ print("Updating the collector-contrib version in all go.mod files.")
+ repo = "open-telemetry/opentelemetry-collector-contrib"
+ modules = ["github.com/open-telemetry/opentelemetry-collector-contrib"]
+ collector_version = fetch_latest_release(repo)
+ if collector_version:
+ print(f"Latest release for {repo}: {collector_version}")
+ version_modules = {
+ collector_version: modules,
+ }
+ update_all_go_mod(version_modules)
+ update_versions_in_yaml(
+ MANIFEST_FILE,
+ collector_version,
+ "github.com/open-telemetry/opentelemetry-collector-contrib",
+ )
+
+ else:
+ print(f"Failed to fetch the latest release for {repo}")
+ print("Collector-contrib update complete.")
+
+
+@task(post=[tidy])
+def update(ctx):
+ update_core_collector()
+ update_collector_contrib()
+ print("Update complete.")
diff --git a/tasks/go.py b/tasks/go.py
index eee8afc40f7c1..f2c25c7f82bdb 100644
--- a/tasks/go.py
+++ b/tasks/go.py
@@ -19,15 +19,15 @@
from invoke import task
from invoke.exceptions import Exit
-import tasks.modules
from tasks.build_tags import ALL_TAGS, UNIT_TEST_TAGS, get_default_build_tags
from tasks.libs.common.color import color_message
from tasks.libs.common.git import check_uncommitted_changes
from tasks.libs.common.go import download_go_dependencies
+from tasks.libs.common.gomodules import Configuration, GoModule, get_default_modules
from tasks.libs.common.user_interactions import yes_no_question
from tasks.libs.common.utils import TimedOperationResult, get_build_flags, timed
from tasks.licenses import get_licenses_list
-from tasks.modules import DEFAULT_MODULES, generate_dummy_package
+from tasks.modules import generate_dummy_package
GOOS_MAPPING = {
"win32": "windows",
@@ -102,7 +102,7 @@ def internal_deps_checker(ctx, formatFile=False):
"""
repo_path = os.getcwd()
extra_params = "--formatFile true" if formatFile else ""
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
ctx.run(
f"go run ./internal/tools/modformatter/modformatter.go --path={mod.full_path()} --repoPath={repo_path} {extra_params}"
)
@@ -113,7 +113,7 @@ def deps(ctx, verbose=False):
"""
Setup Go dependencies
"""
- paths = [mod.full_path() for mod in DEFAULT_MODULES.values()]
+ paths = [mod.full_path() for mod in get_default_modules().values()]
download_go_dependencies(ctx, paths, verbose=verbose)
@@ -214,6 +214,7 @@ def generate_protobuf(ctx):
'process': (False, False),
'workloadmeta': (False, False),
'languagedetection': (False, False),
+ 'remoteagent': (False, False),
}
# maybe put this in a separate function
@@ -360,7 +361,7 @@ def reset(ctx):
@task
def check_go_mod_replaces(_):
errors_found = set()
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
go_sum = os.path.join(mod.full_path(), "go.sum")
if not os.path.exists(go_sum):
continue
@@ -381,11 +382,30 @@ def check_go_mod_replaces(_):
raise Exit(message=message)
+def raise_if_errors(errors_found, suggestion_msg=None):
+ if errors_found:
+ message = "\nErrors found:\n" + "\n".join(" - " + error for error in errors_found)
+ if suggestion_msg:
+ message += f"\n\n{suggestion_msg}"
+ raise Exit(message=message)
+
+
+def check_valid_mods(ctx):
+ errors_found = []
+ for mod in get_default_modules().values():
+ pattern = os.path.join(mod.full_path(), '*.go')
+ if not glob.glob(pattern):
+ errors_found.append(f"module {mod.import_path} does not contain *.go source files, so it is not a package")
+ raise_if_errors(errors_found)
+ return bool(errors_found)
+
+
@task
def check_mod_tidy(ctx, test_folder="testmodule"):
+ check_valid_mods(ctx)
with generate_dummy_package(ctx, test_folder) as dummy_folder:
errors_found = []
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
with ctx.cd(mod.full_path()):
ctx.run("go mod tidy")
@@ -398,7 +418,7 @@ def check_mod_tidy(ctx, test_folder="testmodule"):
if res.exited is None or res.exited > 0:
errors_found.append(f"go.mod or go.sum for {mod.import_path} module is out of sync")
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
# Ensure that none of these modules import the datadog-agent main module.
if mod.independent:
ctx.run(f"go run ./internal/tools/independent-lint/independent.go --path={mod.full_path()}")
@@ -411,10 +431,7 @@ def check_mod_tidy(ctx, test_folder="testmodule"):
if os.path.isfile(os.path.join(ctx.cwd, "main")):
os.remove(os.path.join(ctx.cwd, "main"))
- if errors_found:
- message = "\nErrors found:\n" + "\n".join(" - " + error for error in errors_found)
- message += "\n\nRun 'inv tidy' to fix 'out of sync' errors."
- raise Exit(message=message)
+ raise_if_errors(errors_found, "Run 'inv tidy' to fix 'out of sync' errors.")
@task
@@ -426,6 +443,8 @@ def tidy_all(ctx):
@task
def tidy(ctx):
+ check_valid_mods(ctx)
+
if os.name != 'nt': # not windows
import resource
@@ -438,7 +457,7 @@ def tidy(ctx):
# Note: It's currently faster to tidy everything than looking for exactly what we should tidy
promises = []
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
with ctx.cd(mod.full_path()):
# https://docs.pyinvoke.org/en/stable/api/runners.html#invoke.runners.Runner.run
promises.append(ctx.run("go mod tidy", asynchronous=True))
@@ -469,7 +488,7 @@ def go_fix(ctx, fix=None):
fixarg = f" -fix {fix}"
oslist = ["linux", "windows", "darwin"]
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
with ctx.cd(mod.full_path()):
for osname in oslist:
tags = set(ALL_TAGS).union({osname, "ebpf_bindata"})
@@ -506,44 +525,6 @@ def add_replaces(ctx, path, replaces: Iterable[str]):
ctx.run(f"go mod edit -replace={online_path}={module_local_path}")
-def add_go_module(path):
- """
- Add go module to modules.py
- """
- print(color_message("Updating DEFAULT_MODULES within modules.py", "blue"))
- modules_path = tasks.modules.__file__
- with open(modules_path) as f:
- modulespy = f.read()
-
- modulespy_regex = re.compile(r"DEFAULT_MODULES = {\n(.+?)\n}", re.DOTALL | re.MULTILINE)
-
- all_modules_match = modulespy_regex.search(modulespy)
- assert all_modules_match, "Could not find DEFAULT_MODULES in modules.py"
- all_modules = all_modules_match.group(1)
- all_modules = all_modules.split('\n')
- indent = ' ' * 4
-
- new_module = f'{indent}"{path}": GoModule("{path}", independent=True),'
-
- # Insert in order
- insert_line = 0
- for i, line in enumerate(all_modules):
- # This line is the start of a module (not a comment / middle of a module declaration)
- if line.startswith(f'{indent}"'):
- results = re.search(rf'{indent}"([^"]*)"', line)
- assert results, f"Could not find module name in line '{line}'"
- module = results.group(1)
- if module < path:
- insert_line = i
- else:
- assert module != path, f"Module {path} already exists within {modules_path}"
-
- all_modules.insert(insert_line, new_module)
- all_modules = '\n'.join(all_modules)
- with open(modules_path, 'w') as f:
- f.write(modulespy.replace(all_modules_match.group(1), all_modules))
-
-
@task
def create_module(ctx, path: str, no_verify: bool = False):
"""
@@ -575,6 +556,9 @@ def create_module(ctx, path: str, no_verify: bool = False):
""".replace(' ', '')
try:
+ modules = Configuration.from_file()
+ assert path not in modules.modules, f'Module {path} already exists'
+
# Create package
print(color_message(f"Creating package {path}", "blue"))
@@ -618,8 +602,12 @@ def create_module(ctx, path: str, no_verify: bool = False):
for mod in dependent_modules:
add_replaces(ctx, mod, [path])
- # Update modules.py
- add_go_module(path)
+ # Add this module as independent in the module configuration
+ modules.modules[path] = GoModule(path, independent=True)
+ modules.to_file()
+ print(
+ f'{color_message("NOTE", "blue")}: The modules.yml file has been updated to mark the module as independent, you can modify this file to change the module configuration.'
+ )
if not is_empty:
# Tidy all
@@ -670,7 +658,7 @@ def mod_diffs(_, targets):
"""
# Find all go.mod files in the repo
all_go_mod_files = []
- for module in DEFAULT_MODULES:
+ for module in get_default_modules():
all_go_mod_files.append(os.path.join(module, 'go.mod'))
# Validate the provided targets
diff --git a/tasks/gotest.py b/tasks/gotest.py
index 1b551d6c6d31c..2c2a444476775 100644
--- a/tasks/gotest.py
+++ b/tasks/gotest.py
@@ -31,6 +31,7 @@
from tasks.libs.common.color import color_message
from tasks.libs.common.datadog_api import create_count, send_metrics
from tasks.libs.common.git import get_modified_files
+from tasks.libs.common.gomodules import get_default_modules
from tasks.libs.common.junit_upload_core import enrich_junitxml, produce_junit_tar
from tasks.libs.common.utils import (
clean_nested_paths,
@@ -39,7 +40,7 @@
running_in_ci,
)
from tasks.libs.releasing.json import _get_release_json_value
-from tasks.modules import DEFAULT_MODULES, GoModule, get_module_by_path
+from tasks.modules import GoModule, get_module_by_path
from tasks.test_core import ModuleTestResult, process_input_args, process_module_results, test_core
from tasks.testwasher import TestWasher
from tasks.trace_agent import integration_tests as trace_integration_tests
@@ -135,7 +136,7 @@ def test_flavor(
def command(test_results, module, module_result):
module_path = module.full_path()
with ctx.cd(module_path):
- packages = ' '.join(f"{t}/..." if not t.endswith("/...") else t for t in module.targets)
+ packages = ' '.join(f"{t}/..." if not t.endswith("/...") else t for t in module.test_targets)
with CodecovWorkaround(ctx, module_path, coverage, packages, args) as cov_test_path:
res = ctx.run(
command=cmd.format(
@@ -467,7 +468,7 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]:
assert best_module_path, f"No module found for {modified_file}"
module = get_module_by_path(best_module_path)
- targets = module.lint_targets if lint else module.targets
+ targets = module.lint_targets if lint else module.test_targets
for target in targets:
if os.path.normpath(os.path.join(best_module_path, target)) in modified_file:
@@ -501,24 +502,24 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]:
if best_module_path in modules_to_test:
if (
- modules_to_test[best_module_path].targets is not None
- and os.path.dirname(modified_file) not in modules_to_test[best_module_path].targets
+ modules_to_test[best_module_path].test_targets is not None
+ and os.path.dirname(modified_file) not in modules_to_test[best_module_path].test_targets
):
- modules_to_test[best_module_path].targets.append(relative_target)
+ modules_to_test[best_module_path].test_targets.append(relative_target)
else:
- modules_to_test[best_module_path] = GoModule(best_module_path, targets=[relative_target])
+ modules_to_test[best_module_path] = GoModule(best_module_path, test_targets=[relative_target])
# Clean up duplicated paths to reduce Go test cmd length
for module in modules_to_test:
- modules_to_test[module].targets = clean_nested_paths(modules_to_test[module].targets)
+ modules_to_test[module].test_targets = clean_nested_paths(modules_to_test[module].test_targets)
if (
- len(modules_to_test[module].targets) >= WINDOWS_MAX_PACKAGES_NUMBER
+ len(modules_to_test[module].test_targets) >= WINDOWS_MAX_PACKAGES_NUMBER
): # With more packages we can reach the limit of the command line length on Windows
- modules_to_test[module].targets = DEFAULT_MODULES[module].targets
+ modules_to_test[module].test_targets = get_default_modules()[module].test_targets
print("Running tests for the following modules:")
for module in modules_to_test:
- print(f"- {module}: {modules_to_test[module].targets}")
+ print(f"- {module}: {modules_to_test[module].test_targets}")
return list(modules_to_test.values())
@@ -666,7 +667,7 @@ def get_impacted_packages(ctx, build_tags=None):
# Some files like tasks/gotest.py should trigger all tests
if should_run_all_tests(ctx, TRIGGER_ALL_TESTS_PATHS):
print(f"Triggering all tests because a file matching one of the {TRIGGER_ALL_TESTS_PATHS} was modified")
- return DEFAULT_MODULES.values()
+ return get_default_modules().values()
modified_packages = {f"github.com/DataDog/datadog-agent/{os.path.dirname(file)}" for file in files}
@@ -698,7 +699,7 @@ def create_dependencies(ctx, build_tags=None):
if build_tags is None:
build_tags = []
modules_deps = defaultdict(set)
- for modules in DEFAULT_MODULES:
+ for modules in get_default_modules():
with ctx.cd(modules):
res = ctx.run(
'go list '
@@ -751,12 +752,12 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[
module_path = get_go_module(package)
# Check if the module is in the target list of the modules we want to test
- if module_path not in DEFAULT_MODULES or not DEFAULT_MODULES[module_path].condition():
+ if module_path not in get_default_modules() or not get_default_modules()[module_path].should_test():
continue
# Check if the package is in the target list of the module we want to test
targeted = False
- for target in DEFAULT_MODULES[module_path].targets:
+ for target in get_default_modules()[module_path].test_targets:
if normpath(os.path.join(module_path, target)) in package:
targeted = True
break
@@ -770,25 +771,28 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[
relative_target = "./" + os.path.relpath(package, module_path).replace("\\", "/")
if module_path in modules_to_test:
- if modules_to_test[module_path].targets is not None and package not in modules_to_test[module_path].targets:
- modules_to_test[module_path].targets.append(relative_target)
+ if (
+ modules_to_test[module_path].test_targets is not None
+ and package not in modules_to_test[module_path].test_targets
+ ):
+ modules_to_test[module_path].test_targets.append(relative_target)
else:
- modules_to_test[module_path] = GoModule(module_path, targets=[relative_target])
+ modules_to_test[module_path] = GoModule(module_path, test_targets=[relative_target])
# Clean up duplicated paths to reduce Go test cmd length
for module in modules_to_test:
- modules_to_test[module].targets = clean_nested_paths(modules_to_test[module].targets)
+ modules_to_test[module].test_targets = clean_nested_paths(modules_to_test[module].test_targets)
if (
- len(modules_to_test[module].targets) >= WINDOWS_MAX_PACKAGES_NUMBER
+ len(modules_to_test[module].test_targets) >= WINDOWS_MAX_PACKAGES_NUMBER
): # With more packages we can reach the limit of the command line length on Windows
- modules_to_test[module].targets = DEFAULT_MODULES[module].targets
+ modules_to_test[module].test_targets = get_default_modules()[module].test_targets
module_to_remove = []
# Clean up to avoid running tests on package with no Go files matching build tags
for module in modules_to_test:
with ctx.cd(module):
res = ctx.run(
- f'go list -tags "{" ".join(build_tags)}" {" ".join([normpath(os.path.join("github.com/DataDog/datadog-agent", module, target)) for target in modules_to_test[module].targets])}',
+ f'go list -tags "{" ".join(build_tags)}" {" ".join([normpath(os.path.join("github.com/DataDog/datadog-agent", module, target)) for target in modules_to_test[module].test_targets])}',
hide=True,
warn=True,
)
@@ -798,8 +802,8 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[
package.split(" ")[1].strip(":").replace("github.com/DataDog/datadog-agent/", ""), module
).replace("\\", "/")
try:
- modules_to_test[module].targets.remove(f"./{package_to_remove}")
- if len(modules_to_test[module].targets) == 0:
+ modules_to_test[module].test_targets.remove(f"./{package_to_remove}")
+ if len(modules_to_test[module].test_targets) == 0:
module_to_remove.append(module)
except Exception:
print("Could not remove ", package_to_remove, ", ignoring...")
@@ -808,7 +812,7 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[
print("Running tests for the following modules:")
for module in modules_to_test:
- print(f"- {module}: {modules_to_test[module].targets}")
+ print(f"- {module}: {modules_to_test[module].test_targets}")
return modules_to_test.values()
@@ -903,7 +907,7 @@ def check_otel_module_versions(ctx):
raise Exit(f"Error parsing upstream go.mod version: {OTEL_UPSTREAM_GO_MOD_PATH}")
upstream_version = matches[0]
- for path, module in DEFAULT_MODULES.items():
+ for path, module in get_default_modules().items():
if module.used_by_otel:
mod_file = f"./{path}/go.mod"
with open(mod_file, newline='', encoding='utf-8') as reader:
diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py
index 765e475bf0eba..c9fc615f5c81d 100644
--- a/tasks/kernel_matrix_testing/compiler.py
+++ b/tasks/kernel_matrix_testing/compiler.py
@@ -185,9 +185,9 @@ def start(self) -> None:
self.exec("echo conda activate ddpy3 >> /home/compiler/.bashrc", user="compiler")
self.exec(f"install -d -m 0777 -o {uid} -g {uid} /go", user="root")
- # Install all requirements except for libvirt ones (they won't build in the compiler and are not needed)
+ # Install requirements only for building in CI (not libvirt)
self.exec(
- f"cat {CONTAINER_AGENT_PATH}/tasks/kernel_matrix_testing/requirements.txt | grep -v libvirt | xargs pip install ",
+ f"pip install -r {CONTAINER_AGENT_PATH}/tasks/kernel_matrix_testing/requirements-ci.txt",
user="compiler",
)
diff --git a/tasks/kernel_matrix_testing/requirements-ci.txt b/tasks/kernel_matrix_testing/requirements-ci.txt
new file mode 100644
index 0000000000000..e5c5cdb581820
--- /dev/null
+++ b/tasks/kernel_matrix_testing/requirements-ci.txt
@@ -0,0 +1,5 @@
+termcolor==2.5.0
+thefuzz==0.22.1
+python-Levenshtein==0.26.1
+tabulate[widechars]==0.9.0
+jinja2==3.0.3
diff --git a/tasks/kernel_matrix_testing/requirements.txt b/tasks/kernel_matrix_testing/requirements.txt
index 8eea83b19b07e..51c054caa8aac 100644
--- a/tasks/kernel_matrix_testing/requirements.txt
+++ b/tasks/kernel_matrix_testing/requirements.txt
@@ -1,6 +1,2 @@
-libvirt-python==9.2.0
-termcolor==2.3.0
-thefuzz==0.19.0
-python-Levenshtein==0.21.1
-tabulate[widechars]==0.9.0
-jinja2==3.0.3
+-r requirements-ci.txt
+libvirt-python==10.9.0
diff --git a/tasks/kmt.py b/tasks/kmt.py
index f41420fac79e7..12b0ac2ba1953 100644
--- a/tasks/kmt.py
+++ b/tasks/kmt.py
@@ -1326,7 +1326,7 @@ def build(
build_task = "build-sysprobe-binary" if component == "system-probe" else "build"
cc.exec(
- f"cd {CONTAINER_AGENT_PATH} && git config --global --add safe.directory {CONTAINER_AGENT_PATH} && inv {inv_echo} {component}.{build_task} --no-bundle --arch={arch_obj.name}",
+ f"cd {CONTAINER_AGENT_PATH} && git config --global --add safe.directory {CONTAINER_AGENT_PATH} && inv {inv_echo} {component}.{build_task} --arch={arch_obj.name}",
)
cc.exec(f"tar cf {CONTAINER_AGENT_PATH}/kmt-deps/{stack}/build-embedded-dir.tar {EMBEDDED_SHARE_DIR}")
diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py
index 1a8689ace7e6a..2d61567f5016b 100644
--- a/tasks/libs/ciproviders/github_api.py
+++ b/tasks/libs/ciproviders/github_api.py
@@ -451,13 +451,13 @@ def get_user_query(login):
return query + string_var
-def create_release_pr(title, base_branch, target_branch, version, changelog_pr=False):
+def create_release_pr(title, base_branch, target_branch, version, changelog_pr=False, milestone=None):
print(color_message("Creating PR", "bold"))
github = GithubAPI(repository=GITHUB_REPO_NAME)
# Find milestone based on what the next final version is. If the milestone does not exist, fail.
- milestone_name = str(version)
+ milestone_name = milestone or str(version)
milestone = github.get_milestone_by_name(milestone_name)
@@ -491,7 +491,6 @@ def create_release_pr(title, base_branch, target_branch, version, changelog_pr=F
"qa/no-code-change",
"team/agent-delivery",
"team/agent-release-management",
- "category/release_operations",
]
if changelog_pr:
diff --git a/tasks/libs/common/constants.py b/tasks/libs/common/constants.py
index ee49a0ff07685..ff9dc7a285a42 100644
--- a/tasks/libs/common/constants.py
+++ b/tasks/libs/common/constants.py
@@ -9,6 +9,12 @@
ALLOWED_REPO_ALL_BRANCHES = ALLOWED_REPO_NON_NIGHTLY_BRANCHES.union(ALLOWED_REPO_NIGHTLY_BRANCHES)
AGENT_VERSION_CACHE_NAME = "agent-version.cache"
+# Metric Origin Constants:
+# https://github.com/DataDog/dd-source/blob/a060ce7a403c2215c44ebfbcc588e42cd9985aeb/domains/metrics/shared/libs/proto/origin/origin.proto#L144
+ORIGIN_PRODUCT = 17
+ORIGIN_CATEGORY = 29
+ORIGIN_SERVICE = 0
+
# Message templates for releasing tasks
# Defined here either because they're long and would make the code less legible,
# or because they're used multiple times.
diff --git a/tasks/libs/common/datadog_api.py b/tasks/libs/common/datadog_api.py
index e3bf61ac9f78d..55b888678b768 100644
--- a/tasks/libs/common/datadog_api.py
+++ b/tasks/libs/common/datadog_api.py
@@ -3,15 +3,21 @@
from invoke.exceptions import Exit
-def create_metric(metric_type, metric_name, timestamp, value, tags, unit=None):
+def create_metric(metric_type, metric_name, timestamp, value, tags, unit=None, metric_origin=None):
"""
- metric_type: See types in the following documentation https://datadoghq.dev/datadog-api-client-python/datadog_api_client.v2.model.html#module-datadog_api_client.v2.model.metric_intake_type
"""
from datadog_api_client.model_utils import unset
+ from datadog_api_client.v2.model.metric_metadata import MetricMetadata
+ from datadog_api_client.v2.model.metric_origin import MetricOrigin
from datadog_api_client.v2.model.metric_point import MetricPoint
from datadog_api_client.v2.model.metric_series import MetricSeries
unit = unit or unset
+ metadata = unset
+
+ if metric_origin:
+ metadata = MetricMetadata(origin=MetricOrigin(**metric_origin))
return MetricSeries(
metric=metric_name,
@@ -24,6 +30,7 @@ def create_metric(metric_type, metric_name, timestamp, value, tags, unit=None):
],
tags=tags,
unit=unit,
+ metadata=metadata,
)
@@ -33,10 +40,10 @@ def create_count(metric_name, timestamp, value, tags, unit=None):
return create_metric(MetricIntakeType.COUNT, metric_name, timestamp, value, tags, unit)
-def create_gauge(metric_name, timestamp, value, tags, unit=None):
+def create_gauge(metric_name, timestamp, value, tags, unit=None, metric_origin=None):
from datadog_api_client.v2.model.metric_intake_type import MetricIntakeType
- return create_metric(MetricIntakeType.GAUGE, metric_name, timestamp, value, tags, unit)
+ return create_metric(MetricIntakeType.GAUGE, metric_name, timestamp, value, tags, unit, metric_origin)
def send_metrics(series):
diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py
index 61bac36427b21..8c7b9d9e2f080 100644
--- a/tasks/libs/common/git.py
+++ b/tasks/libs/common/git.py
@@ -75,8 +75,9 @@ def get_file_modifications(
line.split('\t')
for line in ctx.run(f"git diff --name-status {flags} {last_main_commit}", hide=True).stdout.splitlines()
]
-
if added or modified or removed:
+ # skip when a file is renamed
+ modifications = [m for m in modifications if len(m) != 3]
modifications = [
(status, file)
for status, file in modifications
diff --git a/tasks/libs/common/gomodules.py b/tasks/libs/common/gomodules.py
new file mode 100644
index 0000000000000..ffbdd9f03d10d
--- /dev/null
+++ b/tasks/libs/common/gomodules.py
@@ -0,0 +1,348 @@
+"""Provides functions to import / export go modules from / to yaml files."""
+
+from __future__ import annotations
+
+import os
+import subprocess
+import sys
+from collections.abc import Callable
+from dataclasses import dataclass
+from functools import lru_cache
+from pathlib import Path
+from typing import ClassVar
+
+import yaml
+
+import tasks
+
+
+class ConfigDumper(yaml.SafeDumper):
+ """SafeDumper that ignores aliases. (no references for readability)"""
+
+ def ignore_aliases(self, _): # noqa
+ return True
+
+
+@dataclass
+class Configuration:
+ """Represents the top level configuration of the modules."""
+
+ FILE_NAME: ClassVar[str] = 'modules.yml'
+ INFO_COMMENT: ClassVar[str] = """
+# This file contains the go modules configuration.
+# See {file} for more information.
+"""
+
+ # Where this file has been loaded from
+ base_dir: Path
+ # All GoModule to be taken into account (module.path: module)
+ modules: dict[str, GoModule]
+ # Name of each ignored module (not within `modules`)
+ ignored_modules: set[str]
+
+ @staticmethod
+ def from_dict(data: dict[str, dict[str, object]], base_dir: Path | None = None) -> Configuration:
+ base_dir = base_dir or Path.cwd()
+
+ modules = {}
+ ignored_modules = set()
+
+ for name, module_data in data.get('modules', {}).items():
+ if module_data == 'ignored':
+ ignored_modules.add(name)
+ elif module_data == 'default':
+ modules[name] = GoModule.from_dict(name, {})
+ else:
+ modules[name] = GoModule.from_dict(name, module_data)
+
+ return Configuration(base_dir, modules, ignored_modules)
+
+ @classmethod
+ def from_file(cls, base_dir: Path | None = None) -> Configuration:
+ """Load the configuration from a yaml file."""
+
+ base_dir = base_dir or Path.cwd()
+
+ with open(base_dir / cls.FILE_NAME) as file:
+ data = yaml.safe_load(file)
+
+ return Configuration.from_dict(data)
+
+ def to_dict(self) -> dict[str, object]:
+ modules_config = {}
+ # Path removed because the key is the path
+ modules_config.update(
+ {name: module.to_dict(remove_path=True) or 'default' for name, module in self.modules.items()}
+ )
+ modules_config.update({module: 'ignored' for module in self.ignored_modules})
+
+ return {
+ 'modules': modules_config,
+ }
+
+ def to_file(self):
+ """Save the configuration to a yaml file at ."""
+
+ with open(self.base_dir / self.FILE_NAME, "w") as file:
+ path = f'tasks/{Path(__file__).relative_to(Path(tasks.__file__).parent).as_posix()}'
+ print(self.INFO_COMMENT.format(file=path).strip() + '\n', file=file)
+
+ yaml.dump(self.to_dict(), file, Dumper=ConfigDumper)
+
+
+@dataclass
+class GoModule:
+ """A Go module abstraction.
+
+ See:
+ Documentation can be found in .
+
+ Args:
+ test_targets: Directories to unit test.
+ should_test_condition: When to execute tests, must be a enumerated field of `GoModule.CONDITIONS`.
+ should_tag: Whether this module should be tagged or not.
+ importable: HACK: Workaround for modules that can be tested, but not imported (eg. gohai), because they define a main package A better solution would be to automatically detect if a module contains a main package, at the cost of spending some time parsing the module.
+ independent: Specifies whether this modules is supposed to exist independently of the datadog-agent module. If True, a check will run to ensure this is true.
+ lint_targets: Directories to lint.
+ used_by_otel: Whether the module is an otel dependency or not.
+
+ Usage:
+ A module is defined within the modules.yml file containing the following fields by default (these can be omitted if the default value is used):
+ > should_test_condition: always
+ > importable: true
+ > independent: true
+ > lint_targets:
+ > - .
+ > should_tag: true
+ > test_targets:
+ > - .
+ > used_by_otel: false
+
+ If a module has default attributes, it should be defined like this:
+ > my/module: default
+
+ If a module should be ignored and not included within get_default_modules(), it should be defined like this:
+ > my/module: ignored
+ """
+
+ # Possible conditions for GoModule.should_test_condition
+ SHOULD_TEST_CONDITIONS: ClassVar[dict[str, Callable]] = {
+ 'always': lambda: True,
+ 'never': lambda: False,
+ 'is_linux': lambda: sys.platform == "linux",
+ }
+
+ # Posix path of the module's directory
+ path: str
+ # Directories to unit test
+ test_targets: list[str] | None = None
+ # When to execute tests, must be a enumerated field of `GoModule.SHOULD_TEST_CONDITIONS`
+ should_test_condition: str = 'always'
+ # Whether this module should be tagged or not
+ should_tag: bool = True
+ # HACK: Workaround for modules that can be tested, but not imported (eg. gohai), because
+ # they define a main package
+ # A better solution would be to automatically detect if a module contains a main package,
+ # at the cost of spending some time parsing the module.
+ importable: bool = True
+ # Whether this modules is supposed to exist independently of the datadog-agent module. If True, a check will run to ensure this is true.
+ independent: bool = True
+ # Directories to lint
+ lint_targets: list[str] | None = None
+ # Whether the module is an otel dependency or not
+ used_by_otel: bool = False
+ # Used to load agent 6 modules from agent 7
+ legacy_go_mod_version: bool | None = None
+
+ @staticmethod
+ def from_dict(path: str, data: dict[str, object]) -> GoModule:
+ default = GoModule.get_default_attributes()
+
+ return GoModule(
+ path=path,
+ test_targets=data.get("test_targets", default["test_targets"]),
+ lint_targets=data.get("lint_targets", default["lint_targets"]),
+ should_test_condition=data.get("should_test_condition", default["should_test_condition"]),
+ should_tag=data.get("should_tag", default["should_tag"]),
+ importable=data.get("importable", default["importable"]),
+ independent=data.get("independent", default["independent"]),
+ used_by_otel=data.get("used_by_otel", default["used_by_otel"]),
+ legacy_go_mod_version=data.get("legacy_go_mod_version", default["legacy_go_mod_version"]),
+ )
+
+ @staticmethod
+ def get_default_attributes() -> dict[str, object]:
+ attrs = GoModule('.').to_dict(remove_defaults=False)
+ attrs.pop('path')
+
+ return attrs
+
+ def __post_init__(self):
+ self.test_targets = self.test_targets or ["."]
+ self.lint_targets = self.lint_targets or self.test_targets
+
+ self._dependencies = None
+
+ def to_dict(self, remove_defaults=True, remove_path=False) -> dict[str, object]:
+ """Convert to dictionary.
+
+ Args:
+ remove_defaults: Remove default values from the dictionary.
+ remove_path: Remove the path from the dictionary.
+ """
+
+ attrs = {
+ "path": self.path,
+ "test_targets": self.test_targets,
+ "lint_targets": self.lint_targets,
+ "should_test_condition": self.should_test_condition,
+ "should_tag": self.should_tag,
+ "importable": self.importable,
+ "independent": self.independent,
+ "used_by_otel": self.used_by_otel,
+ "legacy_go_mod_version": self.legacy_go_mod_version,
+ }
+
+ if remove_path:
+ del attrs['path']
+
+ if remove_defaults:
+ default_attrs = GoModule.get_default_attributes()
+
+ for key, value in default_attrs.items():
+ if key in attrs and attrs[key] == value:
+ del attrs[key]
+
+ return attrs
+
+ def should_test(self) -> bool:
+ """Verify that the module test condition is met from should_test_condition."""
+
+ function = GoModule.SHOULD_TEST_CONDITIONS[self.should_test_condition]
+
+ return function()
+
+ def __version(self, agent_version):
+ """Return the module version for a given Agent version.
+ >>> mods = [GoModule("."), GoModule("pkg/util/log")]
+ >>> [mod.__version("7.27.0") for mod in mods]
+ ["v7.27.0", "v0.27.0"]
+ """
+ if self.path == ".":
+ return "v" + agent_version
+
+ return "v0" + agent_version[1:]
+
+ def __compute_dependencies(self):
+ """
+ Computes the list of github.com/DataDog/datadog-agent/ dependencies of the module.
+ """
+ base_path = os.getcwd()
+ mod_parser_path = os.path.join(base_path, "internal", "tools", "modparser")
+
+ if not os.path.isdir(mod_parser_path):
+ raise Exception(f"Cannot find go.mod parser in {mod_parser_path}")
+
+ try:
+ output = subprocess.check_output(
+ ["go", "run", ".", "-path", os.path.join(base_path, self.path), "-prefix", AGENT_MODULE_PATH_PREFIX],
+ cwd=mod_parser_path,
+ ).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ print(f"Error while calling go.mod parser: {e.output}")
+ raise e
+
+ # Remove github.com/DataDog/datadog-agent/ from each line
+ return [line[len(AGENT_MODULE_PATH_PREFIX) :] for line in output.strip().splitlines()]
+
+ # FIXME: Change when Agent 6 and Agent 7 releases are decoupled
+ def tag(self, agent_version):
+ """Return the module tag name for a given Agent version.
+ >>> mods = [GoModule("."), GoModule("pkg/util/log")]
+ >>> [mod.tag("7.27.0") for mod in mods]
+ [["6.27.0", "7.27.0"], ["pkg/util/log/v0.27.0"]]
+ """
+ if self.path == ".":
+ return ["6" + agent_version[1:], "7" + agent_version[1:]]
+
+ return [f"{self.path}/{self.__version(agent_version)}"]
+
+ def full_path(self):
+ """Return the absolute path of the Go module."""
+ return os.path.abspath(self.path)
+
+ def go_mod_path(self):
+ """Return the absolute path of the Go module go.mod file."""
+ return self.full_path() + "/go.mod"
+
+ @property
+ def dependencies(self):
+ if not self._dependencies:
+ self._dependencies = self.__compute_dependencies()
+ return self._dependencies
+
+ @property
+ def import_path(self):
+ """Return the Go import path of the Go module
+ >>> mods = [GoModule("."), GoModule("pkg/util/log")]
+ >>> [mod.import_path for mod in mods]
+ ["github.com/DataDog/datadog-agent", "github.com/DataDog/datadog-agent/pkg/util/log"]
+ """
+ path = AGENT_MODULE_PATH_PREFIX.removesuffix('/')
+ if self.path != ".":
+ path += "/" + self.path
+ return path
+
+ def dependency_path(self, agent_version):
+ """Return the versioned dependency path of the Go module
+ >>> mods = [GoModule("."), GoModule("pkg/util/log")]
+ >>> [mod.dependency_path("7.27.0") for mod in mods]
+ ["github.com/DataDog/datadog-agent@v7.27.0", "github.com/DataDog/datadog-agent/pkg/util/log@v0.27.0"]
+ """
+ return f"{self.import_path}@{self.__version(agent_version)}"
+
+
+AGENT_MODULE_PATH_PREFIX = "github.com/DataDog/datadog-agent/"
+
+
+@lru_cache
+def get_default_modules(base_dir: Path | None = None) -> dict[str, GoModule]:
+ """Load the default modules from the modules.yml file.
+
+ Args:
+ base_dir: Root directory of the agent repository ('.' by default).
+ """
+
+ return Configuration.from_file(base_dir).modules
+
+
+def validate_module(
+ module: GoModule, attributes: str | dict[str, object], base_dir: Path, default_attributes: dict[str, object]
+):
+ """Lints a module."""
+
+ assert (base_dir / module.path / 'go.mod').is_file(), "Configuration is not next to a go.mod file"
+
+ if isinstance(attributes, str):
+ assert attributes in ('ignored', 'default'), f"Configuration has an unknown value: {attributes}"
+ return
+
+ # Verify attributes
+ assert set(default_attributes).issuperset(
+ attributes
+ ), f"Configuration contains unknown attributes ({set(attributes).difference(default_attributes)})"
+ for key, value in attributes.items():
+ assert (
+ attributes[key] != default_attributes[key]
+ ), f"Configuration has a default value which must be removed for {key}: {value}"
+
+ # Verify values
+ for target in module.test_targets:
+ assert (base_dir / module.path / target).is_dir(), f"Configuration has an unknown target: {target}"
+
+ for target in module.lint_targets:
+ assert (base_dir / module.path / target).is_dir(), f"Configuration has an unknown lint_target: {target}"
+
+ assert (
+ module.should_test_condition in GoModule.SHOULD_TEST_CONDITIONS
+ ), f"Configuration has an unknown should_test_condition: {module.should_test_condition}"
diff --git a/tasks/libs/common/junit_upload_core.py b/tasks/libs/common/junit_upload_core.py
index 5d2508e055bc6..54d01fc2af0a4 100644
--- a/tasks/libs/common/junit_upload_core.py
+++ b/tasks/libs/common/junit_upload_core.py
@@ -16,6 +16,7 @@
from invoke.exceptions import Exit
from tasks.flavor import AgentFlavor
+from tasks.libs.common.gomodules import get_default_modules
from tasks.libs.common.utils import gitlab_section
from tasks.libs.pipeline.notifications import (
DEFAULT_JIRA_PROJECT,
@@ -24,7 +25,6 @@
GITHUB_SLACK_MAP,
)
from tasks.libs.testing.flakes import get_tests_family, is_known_flaky_test
-from tasks.modules import DEFAULT_MODULES
E2E_INTERNAL_ERROR_STRING = "E2E INTERNAL ERROR"
CODEOWNERS_ORG_PREFIX = "@DataDog/"
@@ -132,7 +132,7 @@ def get_flaky_from_test_output():
return flaky_tests
# If the global test output file is not present, we look for module specific test output files
- for module in DEFAULT_MODULES:
+ for module in get_default_modules():
test_file = Path(module, MODULE_TEST_OUTPUT_FILE)
if test_file.is_file():
with test_file.open(encoding="utf8") as f:
diff --git a/tasks/libs/common/omnibus.py b/tasks/libs/common/omnibus.py
index 6809892feac7e..803400026082b 100644
--- a/tasks/libs/common/omnibus.py
+++ b/tasks/libs/common/omnibus.py
@@ -6,6 +6,8 @@
import requests
+from tasks.libs.common.constants import ORIGIN_CATEGORY, ORIGIN_PRODUCT, ORIGIN_SERVICE
+from tasks.libs.common.utils import get_metric_origin
from tasks.release import _get_release_json_value
@@ -267,6 +269,7 @@ def send_build_metrics(ctx, overall_duration):
],
'unit': 'seconds',
'type': 0,
+ "metadata": get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE, True),
}
)
# We also provide the total duration for the omnibus build as a separate metric
@@ -281,6 +284,7 @@ def send_build_metrics(ctx, overall_duration):
],
'unit': 'seconds',
'type': 0,
+ "metadata": get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE, True),
}
)
# Stripping might not always be enabled so we conditionally read the metric
@@ -296,6 +300,7 @@ def send_build_metrics(ctx, overall_duration):
],
'unit': 'seconds',
'type': 0,
+ "metadata": get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE, True),
}
)
# And all packagers duration as another separated metric
@@ -312,6 +317,7 @@ def send_build_metrics(ctx, overall_duration):
],
'unit': 'seconds',
'type': 0,
+ "metadata": get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE, True),
}
)
if sys.platform == 'win32':
diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py
index 051ddb974496a..10882ddfeed7d 100644
--- a/tasks/libs/common/utils.py
+++ b/tasks/libs/common/utils.py
@@ -730,3 +730,22 @@ def wrapper(*args, **kwargs):
return wrapper
return decorator
+
+
+def get_metric_origin(origin_product, origin_sub_product, origin_product_detail, origin_field=False):
+ """
+ Returns a dictionary representing metric origin metadata.
+
+ When origin_field is True, wraps the origin data in an "origin" field,
+ for dictionary-based(unstructured) calls where the API expects the "origin" wrapper.
+ When origin_field is False, returns the origin data directly, suitable
+ for class-based(structured) calls that handle the wrapper internally.
+ """
+ metric_origin = {
+ "origin_product": origin_product,
+ "origin_sub_product": origin_sub_product,
+ "origin_product_detail": origin_product_detail,
+ }
+ if origin_field:
+ return {"origin": metric_origin}
+ return metric_origin
diff --git a/tasks/libs/package/size.py b/tasks/libs/package/size.py
index 8e8b5cfd6290d..5f564c91837f2 100644
--- a/tasks/libs/package/size.py
+++ b/tasks/libs/package/size.py
@@ -3,6 +3,8 @@
from datetime import datetime
from tasks.libs.common.color import color_message
+from tasks.libs.common.constants import ORIGIN_CATEGORY, ORIGIN_PRODUCT, ORIGIN_SERVICE
+from tasks.libs.common.utils import get_metric_origin
DEBIAN_OS = "debian"
CENTOS_OS = "centos"
@@ -104,7 +106,8 @@ def compute_package_size_metrics(
timestamp,
package_compressed_size,
tags=common_tags,
- )
+ metric_origin=get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE),
+ ),
)
series.append(
create_gauge(
@@ -112,7 +115,8 @@ def compute_package_size_metrics(
timestamp,
package_uncompressed_size,
tags=common_tags,
- )
+ metric_origin=get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE),
+ ),
)
for binary_name, binary_path in SCANNED_BINARIES[flavor].items():
@@ -123,7 +127,8 @@ def compute_package_size_metrics(
timestamp,
binary_size,
tags=common_tags + [f"bin:{binary_name}"],
- )
+ metric_origin=get_metric_origin(ORIGIN_PRODUCT, ORIGIN_CATEGORY, ORIGIN_SERVICE),
+ ),
)
return series
diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml
index ac4e37e186a1e..d3262de27a247 100644
--- a/tasks/libs/pipeline/github_jira_map.yaml
+++ b/tasks/libs/pipeline/github_jira_map.yaml
@@ -40,6 +40,6 @@
'@datadog/agent-delivery': BARX
'@datadog/agent-devx-loops': ADXT
'@datadog/agent-devx-infra': ACIX
-'@datadog/apm-onboarding': APMON
+'@datadog/injection-platform': INPLAT
'@datadog/agent-processing-and-routing': APR
'@DataDog/container-ecosystems': CECO
diff --git a/tasks/modules.py b/tasks/modules.py
index 956be399697a7..9c1d8dbf24d71 100644
--- a/tasks/modules.py
+++ b/tasks/modules.py
@@ -1,317 +1,29 @@
+from __future__ import annotations
+
import json
import os
-import subprocess
import sys
+import tempfile
from collections import defaultdict
from contextlib import contextmanager
+from glob import glob
from pathlib import Path
+import yaml
from invoke import Context, Exit, task
from tasks.libs.common.color import Color, color_message
+from tasks.libs.common.gomodules import (
+ ConfigDumper,
+ Configuration,
+ GoModule,
+ get_default_modules,
+ validate_module,
+)
AGENT_MODULE_PATH_PREFIX = "github.com/DataDog/datadog-agent/"
-class GoModule:
- """
- A Go module abstraction.
- independent specifies whether this modules is supposed to exist independently of the datadog-agent module.
- If True, a check will run to ensure this is true.
- """
-
- def __init__(
- self,
- path,
- targets=None,
- condition=lambda: True,
- should_tag=True,
- importable=True,
- independent=False,
- lint_targets=None,
- used_by_otel=False,
- ):
- self.path = path
- self.targets = targets if targets else ["."]
- self.lint_targets = lint_targets if lint_targets else self.targets
- self.condition = condition
- self.should_tag = should_tag
- # HACK: Workaround for modules that can be tested, but not imported (eg. gohai), because
- # they define a main package
- # A better solution would be to automatically detect if a module contains a main package,
- # at the cost of spending some time parsing the module.
- self.importable = importable
- self.independent = independent
- self.used_by_otel = used_by_otel
-
- self._dependencies = None
-
- def __version(self, agent_version):
- """Return the module version for a given Agent version.
- >>> mods = [GoModule("."), GoModule("pkg/util/log")]
- >>> [mod.__version("7.27.0") for mod in mods]
- ["v7.27.0", "v0.27.0"]
- """
- if self.path == ".":
- return "v" + agent_version
-
- return "v0" + agent_version[1:]
-
- def __compute_dependencies(self):
- """
- Computes the list of github.com/DataDog/datadog-agent/ dependencies of the module.
- """
- base_path = os.getcwd()
- mod_parser_path = os.path.join(base_path, "internal", "tools", "modparser")
-
- if not os.path.isdir(mod_parser_path):
- raise Exception(f"Cannot find go.mod parser in {mod_parser_path}")
-
- try:
- output = subprocess.check_output(
- ["go", "run", ".", "-path", os.path.join(base_path, self.path), "-prefix", AGENT_MODULE_PATH_PREFIX],
- cwd=mod_parser_path,
- ).decode("utf-8")
- except subprocess.CalledProcessError as e:
- print(f"Error while calling go.mod parser: {e.output}")
- raise e
-
- # Remove github.com/DataDog/datadog-agent/ from each line
- return [line[len(AGENT_MODULE_PATH_PREFIX) :] for line in output.strip().splitlines()]
-
- # FIXME: Change when Agent 6 and Agent 7 releases are decoupled
- def tag(self, agent_version):
- """Return the module tag name for a given Agent version.
- >>> mods = [GoModule("."), GoModule("pkg/util/log")]
- >>> [mod.tag("7.27.0") for mod in mods]
- [["6.27.0", "7.27.0"], ["pkg/util/log/v0.27.0"]]
- """
- if self.path == ".":
- return ["6" + agent_version[1:], "7" + agent_version[1:]]
-
- return [f"{self.path}/{self.__version(agent_version)}"]
-
- def full_path(self):
- """Return the absolute path of the Go module."""
- return os.path.abspath(self.path)
-
- def go_mod_path(self):
- """Return the absolute path of the Go module go.mod file."""
- return self.full_path() + "/go.mod"
-
- @property
- def dependencies(self):
- if not self._dependencies:
- self._dependencies = self.__compute_dependencies()
- return self._dependencies
-
- @property
- def import_path(self):
- """Return the Go import path of the Go module
- >>> mods = [GoModule("."), GoModule("pkg/util/log")]
- >>> [mod.import_path for mod in mods]
- ["github.com/DataDog/datadog-agent", "github.com/DataDog/datadog-agent/pkg/util/log"]
- """
- path = AGENT_MODULE_PATH_PREFIX.removesuffix('/')
- if self.path != ".":
- path += "/" + self.path
- return path
-
- def dependency_path(self, agent_version):
- """Return the versioned dependency path of the Go module
- >>> mods = [GoModule("."), GoModule("pkg/util/log")]
- >>> [mod.dependency_path("7.27.0") for mod in mods]
- ["github.com/DataDog/datadog-agent@v7.27.0", "github.com/DataDog/datadog-agent/pkg/util/log@v0.27.0"]
- """
- return f"{self.import_path}@{self.__version(agent_version)}"
-
-
-# Default Modules on which will run tests / linters. When `condition=lambda: False` is defined for a module, it will be skipped.
-DEFAULT_MODULES = {
- ".": GoModule(
- ".",
- targets=["./pkg", "./cmd", "./comp"],
- ),
- "pkg/util/defaultpaths": GoModule("pkg/util/defaultpaths", independent=True, used_by_otel=True),
- "comp/api/api/def": GoModule("comp/api/api/def", independent=True, used_by_otel=True),
- "comp/api/authtoken": GoModule("comp/api/authtoken", independent=True, used_by_otel=True),
- "comp/core/config": GoModule("comp/core/config", independent=True, used_by_otel=True),
- "comp/core/flare/builder": GoModule("comp/core/flare/builder", independent=True, used_by_otel=True),
- "comp/core/flare/types": GoModule("comp/core/flare/types", independent=True, used_by_otel=True),
- "comp/core/hostname/hostnameinterface": GoModule(
- "comp/core/hostname/hostnameinterface", independent=True, used_by_otel=True
- ),
- "comp/core/log/def": GoModule("comp/core/log/def", independent=True, used_by_otel=True),
- "comp/core/log/impl": GoModule("comp/core/log/impl", independent=True, used_by_otel=True),
- "comp/core/log/impl-trace": GoModule("comp/core/log/impl-trace", independent=True),
- "comp/core/log/mock": GoModule("comp/core/log/mock", independent=True, used_by_otel=True),
- "comp/core/secrets": GoModule("comp/core/secrets", independent=True, used_by_otel=True),
- "comp/core/status": GoModule("comp/core/status", independent=True, used_by_otel=True),
- "comp/core/status/statusimpl": GoModule("comp/core/status/statusimpl", independent=True),
- "comp/core/tagger/types": GoModule("comp/core/tagger/types", independent=True, used_by_otel=True),
- "comp/core/tagger/utils": GoModule("comp/core/tagger/utils", independent=True, used_by_otel=True),
- "comp/core/telemetry": GoModule("comp/core/telemetry", independent=True, used_by_otel=True),
- "comp/def": GoModule("comp/def", independent=True, used_by_otel=True),
- "comp/forwarder/defaultforwarder": GoModule("comp/forwarder/defaultforwarder", independent=True, used_by_otel=True),
- "comp/forwarder/orchestrator/orchestratorinterface": GoModule(
- "comp/forwarder/orchestrator/orchestratorinterface", independent=True, used_by_otel=True
- ),
- "comp/logs/agent/config": GoModule("comp/logs/agent/config", independent=True, used_by_otel=True),
- "comp/netflow/payload": GoModule("comp/netflow/payload", independent=True),
- "comp/otelcol/collector-contrib/def": GoModule(
- "comp/otelcol/collector-contrib/def", independent=True, used_by_otel=True
- ),
- "comp/otelcol/collector-contrib/impl": GoModule(
- "comp/otelcol/collector-contrib/impl", independent=True, used_by_otel=True
- ),
- "comp/otelcol/converter/def": GoModule("comp/otelcol/converter/def", independent=True, used_by_otel=True),
- "comp/otelcol/converter/impl": GoModule("comp/otelcol/converter/impl", independent=True, used_by_otel=True),
- "comp/otelcol/ddflareextension/def": GoModule(
- "comp/otelcol/ddflareextension/def", independent=True, used_by_otel=True
- ),
- "comp/otelcol/ddflareextension/impl": GoModule(
- "comp/otelcol/ddflareextension/impl", independent=True, used_by_otel=True
- ),
- "comp/otelcol/logsagentpipeline": GoModule("comp/otelcol/logsagentpipeline", independent=True, used_by_otel=True),
- "comp/otelcol/logsagentpipeline/logsagentpipelineimpl": GoModule(
- "comp/otelcol/logsagentpipeline/logsagentpipelineimpl", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/exporter/datadogexporter": GoModule(
- "comp/otelcol/otlp/components/exporter/datadogexporter", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/exporter/logsagentexporter": GoModule(
- "comp/otelcol/otlp/components/exporter/logsagentexporter", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/exporter/serializerexporter": GoModule(
- "comp/otelcol/otlp/components/exporter/serializerexporter", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/metricsclient": GoModule(
- "comp/otelcol/otlp/components/metricsclient", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/processor/infraattributesprocessor": GoModule(
- "comp/otelcol/otlp/components/processor/infraattributesprocessor", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/components/statsprocessor": GoModule(
- "comp/otelcol/otlp/components/statsprocessor", independent=True, used_by_otel=True
- ),
- "comp/otelcol/otlp/testutil": GoModule("comp/otelcol/otlp/testutil", independent=True, used_by_otel=True),
- "comp/serializer/compression": GoModule("comp/serializer/compression", independent=True, used_by_otel=True),
- "comp/trace/agent/def": GoModule("comp/trace/agent/def", independent=True, used_by_otel=True),
- "comp/trace/compression/def": GoModule("comp/trace/compression/def", independent=True, used_by_otel=True),
- "comp/trace/compression/impl-gzip": GoModule(
- "comp/trace/compression/impl-gzip", independent=True, used_by_otel=True
- ),
- "comp/trace/compression/impl-zstd": GoModule(
- "comp/trace/compression/impl-zstd", independent=True, used_by_otel=True
- ),
- "internal/tools": GoModule("internal/tools", condition=lambda: False, should_tag=False),
- "internal/tools/independent-lint": GoModule(
- "internal/tools/independent-lint", condition=lambda: False, should_tag=False
- ),
- "internal/tools/modformatter": GoModule("internal/tools/modformatter", condition=lambda: False, should_tag=False),
- "internal/tools/modparser": GoModule("internal/tools/modparser", condition=lambda: False, should_tag=False),
- "internal/tools/proto": GoModule("internal/tools/proto", condition=lambda: False, should_tag=False),
- "pkg/aggregator/ckey": GoModule("pkg/aggregator/ckey", independent=True, used_by_otel=True),
- "pkg/api": GoModule("pkg/api", independent=True, used_by_otel=True),
- "pkg/collector/check/defaults": GoModule("pkg/collector/check/defaults", independent=True, used_by_otel=True),
- "pkg/config/env": GoModule("pkg/config/env", independent=True, used_by_otel=True),
- "pkg/config/mock": GoModule("pkg/config/mock", independent=True, used_by_otel=True),
- "pkg/config/nodetreemodel": GoModule("pkg/config/nodetreemodel", independent=True, used_by_otel=True),
- "pkg/config/model": GoModule("pkg/config/model", independent=True, used_by_otel=True),
- "pkg/config/remote": GoModule("pkg/config/remote", independent=True),
- "pkg/config/setup": GoModule("pkg/config/setup", independent=True, used_by_otel=True),
- "pkg/config/teeconfig": GoModule("pkg/config/teeconfig", independent=True, used_by_otel=True),
- "pkg/config/structure": GoModule("pkg/config/structure", independent=True, used_by_otel=True),
- "pkg/config/utils": GoModule("pkg/config/utils", independent=True, used_by_otel=True),
- "pkg/errors": GoModule("pkg/errors", independent=True),
- "pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False),
- "pkg/linters/components/pkgconfigusage": GoModule("pkg/linters/components/pkgconfigusage", should_tag=False),
- "pkg/logs/auditor": GoModule("pkg/logs/auditor", independent=True, used_by_otel=True),
- "pkg/logs/client": GoModule("pkg/logs/client", independent=True, used_by_otel=True),
- "pkg/logs/diagnostic": GoModule("pkg/logs/diagnostic", independent=True, used_by_otel=True),
- "pkg/logs/message": GoModule("pkg/logs/message", independent=True, used_by_otel=True),
- "pkg/logs/metrics": GoModule("pkg/logs/metrics", independent=True, used_by_otel=True),
- "pkg/logs/pipeline": GoModule("pkg/logs/pipeline", independent=True, used_by_otel=True),
- "pkg/logs/processor": GoModule("pkg/logs/processor", independent=True, used_by_otel=True),
- "pkg/logs/sds": GoModule("pkg/logs/sds", independent=True, used_by_otel=True),
- "pkg/logs/sender": GoModule("pkg/logs/sender", independent=True, used_by_otel=True),
- "pkg/logs/sources": GoModule("pkg/logs/sources", independent=True, used_by_otel=True),
- "pkg/logs/status/statusinterface": GoModule("pkg/logs/status/statusinterface", independent=True, used_by_otel=True),
- "pkg/logs/status/utils": GoModule("pkg/logs/status/utils", independent=True, used_by_otel=True),
- "pkg/logs/util/testutils": GoModule("pkg/logs/util/testutils", independent=True, used_by_otel=True),
- "pkg/metrics": GoModule("pkg/metrics", independent=True, used_by_otel=True),
- "pkg/networkdevice/profile": GoModule("pkg/networkdevice/profile", independent=True),
- "pkg/obfuscate": GoModule("pkg/obfuscate", independent=True, used_by_otel=True),
- "pkg/orchestrator/model": GoModule("pkg/orchestrator/model", independent=True, used_by_otel=True),
- "pkg/process/util/api": GoModule("pkg/process/util/api", independent=True, used_by_otel=True),
- "pkg/proto": GoModule("pkg/proto", independent=True, used_by_otel=True),
- "pkg/remoteconfig/state": GoModule("pkg/remoteconfig/state", independent=True, used_by_otel=True),
- "pkg/security/secl": GoModule("pkg/security/secl", independent=True),
- "pkg/security/seclwin": GoModule("pkg/security/seclwin", independent=True, condition=lambda: False),
- "pkg/serializer": GoModule("pkg/serializer", independent=True, used_by_otel=True),
- "pkg/status/health": GoModule("pkg/status/health", independent=True, used_by_otel=True),
- "pkg/tagger/types": GoModule("pkg/tagger/types", independent=True, used_by_otel=True),
- "pkg/tagset": GoModule("pkg/tagset", independent=True, used_by_otel=True),
- "pkg/telemetry": GoModule("pkg/telemetry", independent=True, used_by_otel=True),
- "pkg/trace": GoModule("pkg/trace", independent=True, used_by_otel=True),
- "pkg/trace/stats/oteltest": GoModule("pkg/trace/stats/oteltest", independent=True, used_by_otel=True),
- "pkg/util/backoff": GoModule("pkg/util/backoff", independent=True, used_by_otel=True),
- "pkg/util/buf": GoModule("pkg/util/buf", independent=True, used_by_otel=True),
- "pkg/util/cache": GoModule("pkg/util/cache", independent=True),
- "pkg/util/cgroups": GoModule(
- "pkg/util/cgroups", independent=True, condition=lambda: sys.platform == "linux", used_by_otel=True
- ),
- "pkg/util/common": GoModule("pkg/util/common", independent=True, used_by_otel=True),
- "pkg/util/containers/image": GoModule("pkg/util/containers/image", independent=True, used_by_otel=True),
- "pkg/util/executable": GoModule("pkg/util/executable", independent=True, used_by_otel=True),
- "pkg/util/filesystem": GoModule("pkg/util/filesystem", independent=True, used_by_otel=True),
- "pkg/util/flavor": GoModule("pkg/util/flavor", independent=True),
- "pkg/util/fxutil": GoModule("pkg/util/fxutil", independent=True, used_by_otel=True),
- "pkg/util/grpc": GoModule("pkg/util/grpc", independent=True),
- "pkg/util/hostname/validate": GoModule("pkg/util/hostname/validate", independent=True, used_by_otel=True),
- "pkg/util/http": GoModule("pkg/util/http", independent=True, used_by_otel=True),
- "pkg/util/json": GoModule("pkg/util/json", independent=True, used_by_otel=True),
- "pkg/util/log": GoModule("pkg/util/log", independent=True, used_by_otel=True),
- "pkg/util/log/setup": GoModule("pkg/util/log/setup", independent=True, used_by_otel=True),
- "pkg/util/optional": GoModule("pkg/util/optional", independent=True, used_by_otel=True),
- "pkg/util/pointer": GoModule("pkg/util/pointer", independent=True, used_by_otel=True),
- "pkg/util/scrubber": GoModule("pkg/util/scrubber", independent=True, used_by_otel=True),
- "pkg/util/sort": GoModule("pkg/util/sort", independent=True, used_by_otel=True),
- "pkg/util/startstop": GoModule("pkg/util/startstop", independent=True, used_by_otel=True),
- "pkg/util/statstracker": GoModule("pkg/util/statstracker", independent=True, used_by_otel=True),
- "pkg/util/system": GoModule("pkg/util/system", independent=True, used_by_otel=True),
- "pkg/util/system/socket": GoModule("pkg/util/system/socket", independent=True, used_by_otel=True),
- "pkg/util/testutil": GoModule("pkg/util/testutil", independent=True, used_by_otel=True),
- "pkg/util/uuid": GoModule("pkg/util/uuid", independent=True),
- "pkg/util/winutil": GoModule("pkg/util/winutil", independent=True, used_by_otel=True),
- "pkg/version": GoModule("pkg/version", independent=True, used_by_otel=True),
- "test/fakeintake": GoModule("test/fakeintake", independent=True),
- "test/new-e2e": GoModule(
- "test/new-e2e",
- independent=True,
- targets=["./pkg/runner", "./pkg/utils/e2e/client"],
- lint_targets=[".", "./examples"], # need to explicitly list "examples", otherwise it is skipped
- ),
- "test/otel": GoModule("test/otel", independent=True, used_by_otel=True),
- "tools/retry_file_dump": GoModule("tools/retry_file_dump", condition=lambda: False, should_tag=False),
-}
-
-# Folder containing a `go.mod` file but that should not be added to the DEFAULT_MODULES
-IGNORED_MODULE_PATHS = [
- # Test files
- Path("./internal/tools/modparser/testdata/badformat"),
- Path("./internal/tools/modparser/testdata/match"),
- Path("./internal/tools/modparser/testdata/nomatch"),
- Path("./internal/tools/modparser/testdata/patchgoversion"),
- # This `go.mod` is a hack
- Path("./pkg/process/procutil/resources"),
- # We have test files in the tasks folder
- Path("./tasks"),
- # Test files
- Path("./test/integration/serverless/recorder-extension"),
- Path("./test/integration/serverless/src"),
-]
-
MAIN_TEMPLATE = """package main
import (
@@ -332,8 +44,8 @@ def generate_dummy_package(ctx, folder):
"""
try:
import_paths = []
- for mod in DEFAULT_MODULES.values():
- if mod.path != "." and mod.condition() and mod.importable:
+ for mod in get_default_modules().values():
+ if mod.path != "." and mod.should_test() and mod.importable:
import_paths.append(mod.import_path)
os.mkdir(folder)
@@ -346,7 +58,7 @@ def generate_dummy_package(ctx, folder):
print("Done")
ctx.run("go mod init example.com/testmodule")
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
if mod.path != ".":
ctx.run(f"go mod edit -require={mod.dependency_path('0.0.0')}")
ctx.run(f"go mod edit -replace {mod.import_path}=../{mod.path}")
@@ -374,7 +86,7 @@ def generate_dummy_package(ctx, folder):
@task
def go_work(_: Context):
"""
- Create a go.work file using the module list contained in DEFAULT_MODULES
+ Create a go.work file using the module list contained in get_default_modules()
and the go version contained in the file .go-version.
If there is already a go.work file, it is renamed go.work.backup and a warning is printed.
"""
@@ -399,8 +111,8 @@ def go_work(_: Context):
with open("go.work", "w") as f:
f.write(f"go {go_version}\n\nuse (\n")
- for mod in DEFAULT_MODULES.values():
- prefix = "" if mod.condition() else "//"
+ for mod in get_default_modules().values():
+ prefix = "" if mod.should_test() else "//"
f.write(f"\t{prefix}{mod.path}\n")
f.write(")\n")
@@ -422,15 +134,15 @@ def for_each(
use_targets_path and use_lint_targets_path
), "Only one of use_targets_path and use_lint_targets_path can be set"
- for mod in DEFAULT_MODULES.values():
+ for mod in get_default_modules().values():
if skip_untagged and not mod.should_tag:
continue
- if skip_condition and not mod.condition():
+ if skip_condition and not mod.should_test():
continue
targets = [mod.full_path()]
if use_targets_path:
- targets = [os.path.join(mod.full_path(), target) for target in mod.targets]
+ targets = [os.path.join(mod.full_path(), target) for target in mod.test_targets]
if use_lint_targets_path:
targets = [os.path.join(mod.full_path(), target) for target in mod.lint_targets]
@@ -443,28 +155,58 @@ def for_each(
@task
-def validate(_: Context):
+def validate(ctx: Context, base_dir='.', fix_format=False):
"""
- Test if every module was properly added in the DEFAULT_MODULES list.
+ Lints module configuration file.
+
+ Args:
+ fix_format: If True, will fix the format of the configuration files.
"""
- missing_modules: list[str] = []
- default_modules_paths = {Path(p) for p in DEFAULT_MODULES}
- # Find all go.mod files and make sure they are registered in DEFAULT_MODULES
- for root, dirs, files in os.walk("."):
- dirs[:] = [d for d in dirs if Path(root) / d not in IGNORED_MODULE_PATHS]
+ base_dir = Path(base_dir)
+ config = Configuration.from_file(base_dir)
+ default_attributes = GoModule.get_default_attributes()
+
+ # Verify format
+ with tempfile.TemporaryDirectory() as tmpdir:
+ config.base_dir = Path(tmpdir)
+ config.to_file()
+ config.base_dir = base_dir
+
+ if not ctx.run(
+ f'diff -u {base_dir / Configuration.FILE_NAME} {Path(tmpdir) / Configuration.FILE_NAME}',
+ warn=True,
+ ):
+ if fix_format:
+ print(f'{color_message("Info", Color.BLUE)}: Formatted module configuration file')
+ config.to_file()
+ else:
+ raise Exit(
+ f'{color_message("Error", Color.RED)}: Configuration file is not formatted correctly, use `invoke modules.validate --fix-format` to fix it'
+ )
- if "go.mod" in files and Path(root) not in default_modules_paths:
- missing_modules.append(root)
+ with open(base_dir / Configuration.FILE_NAME) as f:
+ config_attributes = yaml.safe_load(f)['modules']
- if missing_modules:
- message = f"{color_message('ERROR', Color.RED)}: some modules are missing from DEFAULT_MODULES\n"
- for module in missing_modules:
- message += f" {module} is missing from DEFAULT_MODULES\n"
+ config = Configuration.from_file(base_dir)
+ errors = []
+ for module in config.modules.values():
+ try:
+ validate_module(module, config_attributes[module.path], base_dir, default_attributes)
+ except AssertionError as e:
+ errors.append((module.path, e))
- message += "Please add them to the DEFAULT_MODULES list or exclude them from the validation."
+ # Backward check for go.mod (ensure there is a module for each go.mod)
+ for go_mod in glob(str(base_dir / '**/go.mod'), recursive=True):
+ path = Path(go_mod).parent.relative_to(base_dir).as_posix()
+ assert path in config.modules or path in config.ignored_modules, f"Configuration is missing a module for {path}"
- raise Exit(message)
+ if errors:
+ print(f'{color_message("ERROR", Color.RED)}: Some modules have invalid configurations:')
+ for path, error in sorted(errors):
+ print(f'- {color_message(path, Color.BOLD)}: {error}')
+
+ raise Exit(f'{color_message("ERROR", Color.RED)}: Found errors in module configurations, see details above')
@task
@@ -472,7 +214,7 @@ def validate_used_by_otel(ctx: Context):
"""
Verify whether indirect local dependencies of modules labeled "used_by_otel" are also marked with the "used_by_otel" tag.
"""
- otel_mods = [path for path, module in DEFAULT_MODULES.items() if module.used_by_otel]
+ otel_mods = [path for path, module in get_default_modules().items() if module.used_by_otel]
missing_used_by_otel_label: dict[str, list[str]] = defaultdict(list)
# for every module labeled as "used_by_otel"
@@ -495,13 +237,13 @@ def validate_used_by_otel(ctx: Context):
# we need the relative path of module (without github.com/DataDog/datadog-agent/ prefix)
rel_path = require['Path'].removeprefix("github.com/DataDog/datadog-agent/")
# check if indirect module is labeled as "used_by_otel"
- if rel_path not in DEFAULT_MODULES or not DEFAULT_MODULES[rel_path].used_by_otel:
+ if rel_path not in get_default_modules() or not get_default_modules()[rel_path].used_by_otel:
missing_used_by_otel_label[rel_path].append(otel_mod)
if missing_used_by_otel_label:
- message = f"{color_message('ERROR', Color.RED)}: some indirect local dependencies of modules labeled \"used_by_otel\" are not correctly labeled in DEFAULT_MODULES\n"
+ message = f"{color_message('ERROR', Color.RED)}: some indirect local dependencies of modules labeled \"used_by_otel\" are not correctly labeled in get_default_modules()\n"
for k, v in missing_used_by_otel_label.items():
message += f"\t{color_message(k, Color.RED)} is missing (used by {v})\n"
- message += "Please label them as \"used_by_otel\" in the DEFAULT_MODULES list."
+ message += "Please label them as \"used_by_otel\" in the get_default_modules() list."
raise Exit(message)
@@ -510,8 +252,49 @@ def get_module_by_path(path: Path) -> GoModule | None:
"""
Return the GoModule object corresponding to the given path.
"""
- for module in DEFAULT_MODULES.values():
+ for module in get_default_modules().values():
if Path(module.path) == path:
return module
return None
+
+
+@task
+def show(_, path: str, remove_defaults: bool = False, base_dir: str = '.'):
+ """Show the module information for the given path.
+
+ Args:
+ remove_defaults: If True, will remove default values from the output.
+ """
+
+ config = Configuration.from_file(Path(base_dir))
+ if path in config.ignored_modules:
+ print(f'Module {path} is ignored')
+ return
+
+ module = config.modules.get(path)
+
+ assert module, f'Module {path} not found'
+
+ yaml.dump(
+ {path: module.to_dict(remove_defaults=remove_defaults, remove_path=True)}, sys.stdout, Dumper=ConfigDumper
+ )
+
+
+@task
+def show_all(_, base_dir: str = '.', ignored=False):
+ """Show the list of modules.
+
+ Args:
+ ignored: If True, will list ignored modules.
+ """
+
+ config = Configuration.from_file(Path(base_dir))
+
+ if ignored:
+ names = config.ignored_modules
+ else:
+ names = list(config.modules.keys())
+
+ print('\n'.join(sorted(names)))
+ print(len(names), 'modules')
diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py
index 338c816c28ab1..3bed59ad81164 100644
--- a/tasks/new_e2e_tests.py
+++ b/tasks/new_e2e_tests.py
@@ -22,8 +22,8 @@
from tasks.gotest import process_test_result, test_flavor
from tasks.libs.common.git import get_commit_sha
from tasks.libs.common.go import download_go_dependencies
+from tasks.libs.common.gomodules import get_default_modules
from tasks.libs.common.utils import REPO_PATH, color_message, running_in_ci
-from tasks.modules import DEFAULT_MODULES
from tasks.tools.e2e_stacks import destroy_remote_stack
@@ -76,10 +76,10 @@ def run(
1,
)
- e2e_module = DEFAULT_MODULES["test/new-e2e"]
- e2e_module.condition = lambda: True
+ e2e_module = get_default_modules()["test/new-e2e"]
+ e2e_module.should_test_condition = 'always'
if targets:
- e2e_module.targets = targets
+ e2e_module.test_targets = targets
env_vars = {}
if profile:
diff --git a/tasks/process_agent.py b/tasks/process_agent.py
index bc1ac4ed1d60a..6c3481d9c5213 100644
--- a/tasks/process_agent.py
+++ b/tasks/process_agent.py
@@ -6,7 +6,6 @@
from invoke import task
from invoke.exceptions import Exit
-from tasks.agent import build as agent_build
from tasks.build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags
from tasks.flavor import AgentFlavor
from tasks.libs.common.utils import REPO_PATH, bin_name, get_build_flags
@@ -28,21 +27,10 @@ def build(
incremental_build=False,
major_version='7',
go_mod="mod",
- bundle=True,
):
"""
Build the process agent
"""
- if bundle and sys.platform != "win32":
- return agent_build(
- ctx,
- race=race,
- build_include=build_include,
- build_exclude=build_exclude,
- flavor=flavor,
- major_version=major_version,
- go_mod=go_mod,
- )
flavor = AgentFlavor[flavor]
if flavor.is_ot():
diff --git a/tasks/release.py b/tasks/release.py
index bb7a232834f0b..a82deec90161f 100644
--- a/tasks/release.py
+++ b/tasks/release.py
@@ -32,6 +32,7 @@
get_last_release_tag,
try_git_command,
)
+from tasks.libs.common.gomodules import get_default_modules
from tasks.libs.common.user_interactions import yes_no_question
from tasks.libs.pipeline.notifications import (
DEFAULT_JIRA_PROJECT,
@@ -67,7 +68,6 @@
next_rc_version,
parse_major_versions,
)
-from tasks.modules import DEFAULT_MODULES
from tasks.pipeline import edit_schedule, run
from tasks.release_metrics.metrics import get_prs_metrics, get_release_lead_time
@@ -109,9 +109,9 @@ def update_modules(ctx, agent_version, verify=True):
if verify:
check_version(agent_version)
- for module in DEFAULT_MODULES.values():
+ for module in get_default_modules().values():
for dependency in module.dependencies:
- dependency_mod = DEFAULT_MODULES[dependency]
+ dependency_mod = get_default_modules()[dependency]
ctx.run(f"go mod edit -require={dependency_mod.dependency_path(agent_version)} {module.go_mod_path()}")
@@ -170,7 +170,7 @@ def tag_modules(ctx, agent_version, commit="HEAD", verify=True, push=True, force
check_version(agent_version)
force_option = __get_force_option(force)
- for module in DEFAULT_MODULES.values():
+ for module in get_default_modules().values():
# Skip main module; this is tagged at tag_version via __tag_single_module.
if module.should_tag and module.path != ".":
__tag_single_module(ctx, module, agent_version, commit, push, force_option, devel)
@@ -200,7 +200,7 @@ def tag_version(ctx, agent_version, commit="HEAD", verify=True, push=True, force
# Always tag the main module
force_option = __get_force_option(force)
- __tag_single_module(ctx, DEFAULT_MODULES["."], agent_version, commit, push, force_option, devel)
+ __tag_single_module(ctx, get_default_modules()["."], agent_version, commit, push, force_option, devel)
print(f"Created tags for version {agent_version}")
@@ -724,7 +724,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_versions="6,7", up
def _update_last_stable(_, version, major_versions="7"):
"""
- Updates the last_release field(s) of release.json
+ Updates the last_release field(s) of release.json and returns the current milestone
"""
release_json = load_release_json()
list_major_versions = parse_major_versions(major_versions)
@@ -734,6 +734,8 @@ def _update_last_stable(_, version, major_versions="7"):
release_json['last_stable'][str(major)] = str(version)
_save_release_json(release_json)
+ return release_json["current_milestone"]
+
@task
def cleanup(ctx):
@@ -749,7 +751,36 @@ def cleanup(ctx):
if not match:
raise Exit(f'Unexpected version fetched from github {latest_release}', code=1)
version = _create_version_from_match(match)
- _update_last_stable(ctx, version)
+ current_milestone = _update_last_stable(ctx, version)
+
+ # create pull request to update last stable version
+ main_branch = "main"
+ cleanup_branch = f"release/{version}-cleanup"
+ ctx.run(f"git checkout -b {cleanup_branch}")
+ ctx.run("git add release.json")
+
+ commit_message = f"Update last_stable to {version}"
+ ok = try_git_command(ctx, f"git commit -m '{commit_message}'")
+ if not ok:
+ raise Exit(
+ color_message(
+ f"Could not create commit. Please commit manually with:\ngit commit -m {commit_message}\n, push the {cleanup_branch} branch and then open a PR against {main_branch}.",
+ "red",
+ ),
+ code=1,
+ )
+
+ if not ctx.run(f"git push --set-upstream origin {cleanup_branch}", warn=True):
+ raise Exit(
+ color_message(
+ f"Could not push branch {cleanup_branch} to the upstream 'origin'. Please push it manually and then open a PR against {main_branch}.",
+ "red",
+ ),
+ code=1,
+ )
+
+ create_release_pr(commit_message, main_branch, cleanup_branch, version, milestone=current_milestone)
+
edit_schedule(ctx, 2555, ref=version.branch())
diff --git a/tasks/security_agent.py b/tasks/security_agent.py
index a2c6c6473d361..ee6f65f2b82bc 100644
--- a/tasks/security_agent.py
+++ b/tasks/security_agent.py
@@ -13,7 +13,6 @@
from invoke.exceptions import Exit
from invoke.tasks import task
-from tasks.agent import build as agent_build
from tasks.agent import generate_config
from tasks.build_tags import get_default_build_tags
from tasks.go import run_golangci_lint
@@ -60,18 +59,10 @@ def build(
go_mod="mod",
skip_assets=False,
static=False,
- bundle=True,
):
"""
Build the security agent
"""
- if bundle and sys.platform != "win32":
- return agent_build(
- ctx,
- install_path=install_path,
- race=race,
- go_mod=go_mod,
- )
ldflags, gcflags, env = get_build_flags(ctx, major_version=major_version, static=static, install_path=install_path)
@@ -761,7 +752,7 @@ def go_generate_check(ctx):
tasks = [
[cws_go_generate],
[generate_cws_documentation],
- [gen_mocks],
+ # [gen_mocks], TODO: re-enable this when go is bumped to 1.23 and mocker is updated to >2.46.1
[sync_secl_win_pkg],
]
failing_tasks = []
@@ -865,10 +856,12 @@ def sync_secl_win_pkg(ctx):
("accessors_windows.go", "accessors_win.go"),
("legacy_secl.go", None),
("security_profile.go", None),
+ ("string_array_iter.go", None),
]
ctx.run("rm -r pkg/security/seclwin/model")
ctx.run("mkdir -p pkg/security/seclwin/model")
+ ctx.run("cp pkg/security/secl/doc.go pkg/security/seclwin/doc.go")
for ffrom, fto in files_to_copy:
if not fto:
diff --git a/tasks/system_probe.py b/tasks/system_probe.py
index 80ce5319eff59..2517cc71a2edd 100644
--- a/tasks/system_probe.py
+++ b/tasks/system_probe.py
@@ -19,10 +19,7 @@
from invoke.exceptions import Exit
from invoke.tasks import task
-from tasks.agent import BUNDLED_AGENTS
-from tasks.agent import build as agent_build
from tasks.build_tags import UNIT_TEST_TAGS, get_default_build_tags
-from tasks.flavor import AgentFlavor
from tasks.libs.build.ninja import NinjaWriter
from tasks.libs.common.color import color_message
from tasks.libs.common.git import get_commit_sha
@@ -475,7 +472,7 @@ def ninja_cgo_type_files(nw: NinjaWriter):
"pkg/network/ebpf/c/protocols/classification/defs.h",
],
"pkg/network/protocols/ebpf_types.go": [
- "pkg/network/ebpf/c/protocols/classification/defs.h",
+ "pkg/network/ebpf/c/protocols/postgres/types.h",
],
"pkg/network/protocols/http/gotls/go_tls_types.go": [
"pkg/network/ebpf/c/protocols/tls/go-tls-types.h",
@@ -685,7 +682,6 @@ def build(
strip_object_files=False,
strip_binary=False,
with_unit_test=False,
- bundle=True,
ebpf_compiler='clang',
static=False,
):
@@ -712,7 +708,6 @@ def build(
race=race,
incremental_build=incremental_build,
strip_binary=strip_binary,
- bundle=bundle,
arch=arch,
static=static,
)
@@ -740,19 +735,8 @@ def build_sysprobe_binary(
install_path=None,
bundle_ebpf=False,
strip_binary=False,
- bundle=True,
static=False,
) -> None:
- if bundle and not is_windows:
- return agent_build(
- ctx,
- race=race,
- major_version=major_version,
- go_mod=go_mod,
- bundle_ebpf=bundle_ebpf,
- bundle=BUNDLED_AGENTS[AgentFlavor.base] + ["system-probe"],
- )
-
arch_obj = Arch.from_str(arch)
ldflags, gcflags, env = get_build_flags(
diff --git a/tasks/test_core.py b/tasks/test_core.py
index 4cd2f73593a35..81cfebc843059 100644
--- a/tasks/test_core.py
+++ b/tasks/test_core.py
@@ -9,8 +9,9 @@
from tasks.flavor import AgentFlavor
from tasks.libs.civisibility import get_test_link_to_test_on_main
from tasks.libs.common.color import color_message
+from tasks.libs.common.gomodules import get_default_modules
from tasks.libs.common.utils import running_in_ci
-from tasks.modules import DEFAULT_MODULES, GoModule
+from tasks.modules import GoModule
class ModuleResult(abc.ABC):
@@ -145,9 +146,9 @@ def test_core(
if not skip_module_class:
module_result = module_class(path=module.full_path())
if not headless_mode:
- skipped_header = "[Skipped]" if not module.condition() else ""
+ skipped_header = "[Skipped]" if not module.should_test() else ""
print(f"----- {skipped_header} Module '{module.full_path()}'")
- if not module.condition():
+ if not module.should_test():
continue
command(modules_results, module, module_result)
@@ -180,15 +181,15 @@ def process_input_args(
# when this function is called from the command line, targets are passed
# as comma separated tokens in a string
if isinstance(input_targets, str):
- modules = [GoModule(input_module, targets=input_targets.split(','))]
+ modules = [GoModule(input_module, test_targets=input_targets.split(','))]
else:
- modules = [m for m in DEFAULT_MODULES.values() if m.path == input_module]
+ modules = [m for m in get_default_modules().values() if m.path == input_module]
elif isinstance(input_targets, str):
- modules = [GoModule(".", targets=input_targets.split(','))]
+ modules = [GoModule(".", test_targets=input_targets.split(','))]
else:
if not headless_mode:
print("Using default modules and targets")
- modules = DEFAULT_MODULES.values()
+ modules = get_default_modules().values()
flavor = AgentFlavor.base
if input_flavor:
diff --git a/tasks/trace_agent.py b/tasks/trace_agent.py
index c3718e53e4145..812116eeb3c32 100644
--- a/tasks/trace_agent.py
+++ b/tasks/trace_agent.py
@@ -3,7 +3,6 @@
from invoke import Exit, task
-from tasks.agent import build as agent_build
from tasks.build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags
from tasks.flavor import AgentFlavor
from tasks.libs.common.utils import REPO_PATH, bin_name, get_build_flags
@@ -23,23 +22,11 @@ def build(
install_path=None,
major_version='7',
go_mod="mod",
- bundle=False,
):
"""
Build the trace agent.
"""
- if bundle:
- return agent_build(
- ctx,
- race=race,
- build_include=build_include,
- build_exclude=build_exclude,
- flavor=flavor,
- major_version=major_version,
- go_mod=go_mod,
- )
-
flavor = AgentFlavor[flavor]
if flavor.is_ot():
flavor = AgentFlavor.base
diff --git a/tasks/unit_tests/modules_tests.py b/tasks/unit_tests/modules_tests.py
index ee5889b71f1b3..103fccf3b7dab 100644
--- a/tasks/unit_tests/modules_tests.py
+++ b/tasks/unit_tests/modules_tests.py
@@ -3,10 +3,17 @@
import json
import os
import subprocess
+import tempfile
import unittest
+from pathlib import Path
from typing import Any
-from tasks.modules import AGENT_MODULE_PATH_PREFIX, DEFAULT_MODULES
+from tasks.libs.common.gomodules import (
+ AGENT_MODULE_PATH_PREFIX,
+ Configuration,
+ GoModule,
+ get_default_modules,
+)
"""
Here is an abstract of the go.mod file format:
@@ -94,7 +101,7 @@ def get_agent_replaced(self, module: dict) -> set[str]:
def test_modules_replace_agent(self):
"""Ensure that all required datadog-agent modules are replaced"""
- for module_path in DEFAULT_MODULES.keys():
+ for module_path in get_default_modules().keys():
with self.subTest(module_path=module_path):
module = self.load_go_mod(module_path)
self.assertIsInstance(module, dict)
@@ -102,3 +109,216 @@ def test_modules_replace_agent(self):
replaced = self.get_agent_replaced(module)
required_not_replaced = required - replaced
self.assertEqual(required_not_replaced, set(), f"in module {module_path}")
+
+
+class TestGoModuleCondition(unittest.TestCase):
+ def test_always(self):
+ mod = GoModule(path='pkg/my/module', test_targets=['.'], lint_targets=['.'], should_test_condition='always')
+ self.assertTrue(mod.should_test())
+
+ def test_never(self):
+ mod = GoModule(path='pkg/my/module', test_targets=['.'], lint_targets=['.'], should_test_condition='never')
+ self.assertFalse(mod.should_test())
+
+ def test_error(self):
+ mod = GoModule(path='pkg/my/module', test_targets=['.'], lint_targets=['.'], should_test_condition='???')
+ self.assertRaises(KeyError, mod.should_test)
+
+
+class TestGoModuleSerialization(unittest.TestCase):
+ def test_to_dict(self):
+ module = GoModule(
+ path='pkg/my/module',
+ test_targets=['.'],
+ lint_targets=['.'],
+ should_test_condition='always',
+ should_tag=True,
+ importable=True,
+ independent=True,
+ used_by_otel=True,
+ )
+ d = module.to_dict(remove_defaults=False)
+ self.assertEqual(d['path'], module.path)
+ self.assertEqual(d['should_test_condition'], module.should_test_condition)
+ self.assertEqual(d['used_by_otel'], module.used_by_otel)
+
+ def test_to_dict_defaults(self):
+ module = GoModule(
+ path='pkg/my/module',
+ should_test_condition='never',
+ )
+ d = module.to_dict()
+
+ # Default values are not present
+ self.assertDictEqual(d, {'path': module.path, 'should_test_condition': module.should_test_condition})
+
+ def test_from_dict(self):
+ d = {
+ 'path': 'pkg/my/module',
+ 'test_targets': ['.'],
+ 'lint_targets': ['.'],
+ 'should_test_condition': 'always',
+ 'should_tag': True,
+ 'importable': True,
+ 'independent': True,
+ 'used_by_otel': True,
+ }
+ module = GoModule.from_dict(d['path'], d)
+
+ self.assertEqual(d['path'], module.path)
+ self.assertEqual(d['should_test_condition'], module.should_test_condition)
+ self.assertEqual(d['used_by_otel'], module.used_by_otel)
+
+ def test_from_dict_defaults(self):
+ mod = GoModule.from_dict('pkg/my/module', {})
+ mod2 = GoModule.from_dict('pkg/my/module', {'should_tag': True})
+ mod3 = GoModule.from_dict('pkg/my/module', {'should_tag': False})
+
+ self.assertEqual(mod.should_tag, True)
+ self.assertEqual(mod2.should_tag, True)
+ self.assertEqual(mod3.should_tag, False)
+
+ def test_from_to(self):
+ d = {
+ 'path': 'pkg/my/module',
+ 'test_targets': ['.'],
+ 'lint_targets': ['.'],
+ 'should_test_condition': 'always',
+ 'should_tag': True,
+ 'importable': True,
+ 'independent': True,
+ 'used_by_otel': True,
+ 'legacy_go_mod_version': None,
+ }
+ module = GoModule.from_dict(d['path'], d)
+ d2 = module.to_dict(remove_defaults=False)
+ self.assertDictEqual(d, d2)
+
+ module2 = GoModule.from_dict(d2['path'], d2)
+
+ self.assertEqual(module2.path, module.path)
+ self.assertEqual(module2.should_test_condition, module.should_test_condition)
+ self.assertEqual(module2.used_by_otel, module.used_by_otel)
+
+ def test_get_default_modules(self):
+ # Ensure modules are loaded
+ modules = get_default_modules()
+
+ self.assertGreater(len(modules), 0)
+
+ def test_ignored_modules(self):
+ # Ensure ignored modules are not loaded
+ config = Configuration.from_file()
+
+ # Ensure there are ignored modules
+ self.assertGreater(len(config.ignored_modules), 0)
+ self.assertGreater(len(config.modules), 0)
+ self.assertTrue(config.ignored_modules.isdisjoint(config.modules))
+
+ def test_get_default_modules_base(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir = Path(tmpdir)
+ paths = ['pkg/my/module', 'utils/a', 'utils/b']
+ conditions = ['always', 'never', 'always']
+ used_by_otel = [True, False, False]
+
+ # Create modules
+ modules = {
+ path: GoModule(
+ path=path,
+ test_targets=['.'],
+ lint_targets=['.'],
+ should_test_condition=condition,
+ used_by_otel=used_by_otel,
+ )
+ for (path, condition, used_by_otel) in zip(paths, conditions, used_by_otel, strict=True)
+ }
+ Configuration(base_dir=tmpdir, modules=modules, ignored_modules=set()).to_file()
+
+ self.assertTrue((Path(tmpdir) / Configuration.FILE_NAME).is_file())
+
+ # Load modules
+ modules_loaded = get_default_modules(base_dir=Path(tmpdir))
+
+ self.assertDictEqual(modules, modules_loaded)
+
+
+class TestGoModuleConfiguration(unittest.TestCase):
+ def test_from(self):
+ config = {
+ 'modules': {
+ '.': {
+ 'test_targets': ['pkg/my/module'],
+ 'lint_targets': ['pkg/my/module'],
+ 'should_test_condition': 'always',
+ },
+ }
+ }
+ modules = Configuration.from_dict(config).modules
+
+ self.assertEqual(len(modules), 1)
+ self.assertEqual(modules['.'].should_test_condition, 'always')
+
+ def test_from_default(self):
+ config = {
+ 'modules': {
+ '.': {
+ 'test_targets': ['pkg/my/module'],
+ 'lint_targets': ['pkg/my/module'],
+ 'should_test_condition': 'always',
+ },
+ 'default': 'default',
+ }
+ }
+ modules = Configuration.from_dict(config).modules
+
+ self.assertEqual(len(modules), 2)
+ self.assertEqual(modules['default'].to_dict(), {'path': 'default'})
+ self.assertEqual(modules['default'].should_test_condition, GoModule('').should_test_condition)
+
+ def test_from_ignored(self):
+ config = {
+ 'modules': {
+ '.': {
+ 'test_targets': ['pkg/my/module'],
+ 'lint_targets': ['pkg/my/module'],
+ 'should_test_condition': 'always',
+ },
+ 'ignored': 'ignored',
+ }
+ }
+ c = Configuration.from_dict(config)
+
+ self.assertEqual(len(c.modules), 1)
+ self.assertEqual(c.ignored_modules, {'ignored'})
+
+ def test_to(self):
+ c = Configuration(
+ base_dir=Path.cwd(), modules={'mod': GoModule('mod', should_test_condition='never')}, ignored_modules=set()
+ )
+ config = c.to_dict()
+
+ self.assertEqual(len(config['modules']), 1)
+ self.assertDictEqual(config['modules']['mod'], {'should_test_condition': 'never'})
+
+ def test_to_default(self):
+ c = Configuration(
+ base_dir=Path.cwd(),
+ modules={'mod': GoModule('mod', should_test_condition='never'), 'default': GoModule('default')},
+ ignored_modules=set(),
+ )
+ config = c.to_dict()
+
+ self.assertEqual(len(config['modules']), 2)
+ self.assertEqual(config['modules']['default'], 'default')
+
+ def test_to_ignored(self):
+ c = Configuration(
+ base_dir=Path.cwd(),
+ modules={'mod': GoModule('mod', should_test_condition='never')},
+ ignored_modules={'ignored'},
+ )
+ config = c.to_dict()
+
+ self.assertEqual(len(config['modules']), 2)
+ self.assertEqual(config['modules']['ignored'], 'ignored')
diff --git a/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml b/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml
index 042501278b151..7eb5a9a5735fe 100644
--- a/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml
@@ -3,10 +3,10 @@ dist:
description: Manifest that contains awscontainerinsight receiver (should fail collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml b/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml
index 95e2acf3f3549..584fb4dc7f9e7 100644
--- a/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml
@@ -3,12 +3,12 @@ dist:
description: Manifest that contains datadog connector (should get stripped and pass collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
connectors:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml b/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml
index 78597716bdf05..49ea10f7cf539 100644
--- a/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml
@@ -3,12 +3,12 @@ dist:
description: Manifest that contains datadog exporter (should get stripped and pass collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
exporters:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml b/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml
index 7778fbe1331e1..8aa8584ca47b2 100644
--- a/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml
@@ -3,8 +3,8 @@ dist:
description: Manifest that does not contain health check extension (should fail collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml b/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml
index 8b5c6f09fd8c1..62c3266998ae0 100644
--- a/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml
@@ -1,12 +1,12 @@
---
dist:
description: Manifest that has mismatched otelcol and component versions (should fail collector_tests.py)
- otelcol_version: 0.111.0
+ otelcol_version: 0.113.0
extensions:
- gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.99.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml b/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml
index 09d8bc15e2a42..eab4b6d6902ee 100644
--- a/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml
@@ -3,8 +3,8 @@ dist:
description: Manifest that does not contain pprof extension (should fail collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml b/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml
index ab3ac59568354..2d937e177996b 100644
--- a/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml
@@ -3,6 +3,6 @@ dist:
description: Manifest that does not contain prometheus receiver (should fail collector_tests.py)
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
diff --git a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
index 05e9df55b26ac..cf8f42c78f51a 100644
--- a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml
@@ -3,55 +3,55 @@ dist:
module: github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib
name: otelcol-contrib
description: Valid (default) datadog converged Agent ocb manifest (should pass collector_tests.py)
- version: 0.111.0
+ version: 0.113.0
output_path: ./comp/otelcol/collector-contrib/impl
- otelcol_version: 0.111.0
+ otelcol_version: 0.113.0
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0
exporters:
- - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0
+ - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0
processors:
- - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0
+ - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+ - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0
receivers:
- - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0
- - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0
+ - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0
+ - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0
connectors:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0
# When adding a replace, add a comment before it to document why it's needed and when it can be removed
replaces:
diff --git a/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml b/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml
index 0ab0595706c51..5586be9d9a182 100644
--- a/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml
+++ b/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml
@@ -6,12 +6,12 @@ dist:
output_path: ./comp/otelcol/collector-contrib/impl
extensions:
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
# When adding a replace, add a comment before it to document why it's needed and when it can be removed
replaces:
diff --git a/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml b/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml
index 4f28627c08dbf..e1071d3257538 100644
--- a/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml
+++ b/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml
@@ -3,8 +3,8 @@ dist:
description: manifest without zpages extension (should fail collector_tests.py)
extensions:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
receivers:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
diff --git a/tasks/update_go.py b/tasks/update_go.py
index 29eec6ce4374c..a723653716559 100644
--- a/tasks/update_go.py
+++ b/tasks/update_go.py
@@ -10,7 +10,7 @@
from tasks.libs.ciproviders.circleci import update_circleci_config
from tasks.libs.ciproviders.gitlab_api import update_gitlab_config
from tasks.libs.common.color import color_message
-from tasks.modules import DEFAULT_MODULES
+from tasks.libs.common.gomodules import get_default_modules
GO_VERSION_FILE = "./.go-version"
@@ -191,7 +191,7 @@ def _update_references(warn: bool, version: str, dry_run: bool = False):
def _update_go_mods(warn: bool, version: str, include_otel_modules: bool, dry_run: bool = False):
- for path, module in DEFAULT_MODULES.items():
+ for path, module in get_default_modules().items():
if not include_otel_modules and module.used_by_otel:
# only update the go directives in go.mod files not used by otel
# to allow them to keep using the modules
diff --git a/test/integration/corechecks/docker/main_test.go b/test/integration/corechecks/docker/main_test.go
index 643e6f6eb6d7f..4bbaec0d908d5 100644
--- a/test/integration/corechecks/docker/main_test.go
+++ b/test/integration/corechecks/docker/main_test.go
@@ -21,8 +21,8 @@ import (
logdef "github.com/DataDog/datadog-agent/comp/core/log/def"
logfx "github.com/DataDog/datadog-agent/comp/core/log/fx"
"github.com/DataDog/datadog-agent/comp/core/secrets"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
"github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
wmcatalog "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/catalog"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
@@ -136,8 +136,7 @@ func setup() (workloadmeta.Component, tagger.Component, error) {
logfx.Module(),
wmcatalog.GetCatalog(),
workloadmetafx.Module(workloadmeta.NewParams()),
- taggerimpl.Module(),
- fx.Supply(tagger.NewTaggerParams()),
+ taggerfx.Module(tagger.Params{}),
telemetryimpl.Module(),
))
store := deps.Store
diff --git a/test/integration/listeners/docker/docker_listener_test.go b/test/integration/listeners/docker/docker_listener_test.go
index 73cc531323f26..f13594d0f57d9 100644
--- a/test/integration/listeners/docker/docker_listener_test.go
+++ b/test/integration/listeners/docker/docker_listener_test.go
@@ -25,8 +25,8 @@ import (
"github.com/DataDog/datadog-agent/comp/core/autodiscovery/listeners"
acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry"
compcfg "github.com/DataDog/datadog-agent/comp/core/config"
- "github.com/DataDog/datadog-agent/comp/core/tagger"
- "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl"
+ tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def"
+ taggerfx "github.com/DataDog/datadog-agent/comp/core/tagger/fx"
"github.com/DataDog/datadog-agent/comp/core/telemetry"
wmcatalog "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/catalog"
workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def"
@@ -82,6 +82,7 @@ func (suite *DockerListenerTestSuite) SetupSuite() {
}
var err error
+ env.SetFeatures(suite.T(), env.Docker)
deps := fxutil.Test[deps](suite.T(), fx.Options(
core.MockBundle(),
fx.Replace(compcfg.MockParams{
@@ -89,10 +90,8 @@ func (suite *DockerListenerTestSuite) SetupSuite() {
}),
wmcatalog.GetCatalog(),
workloadmetafx.Module(workloadmeta.NewParams()),
- taggerimpl.Module(),
- fx.Supply(tagger.NewTaggerParams()),
+ taggerfx.Module(tagger.Params{}),
))
- env.SetFeatures(suite.T(), env.Docker)
suite.wmeta = deps.WMeta
suite.telemetryStore = acTelemetry.NewStore(deps.Telemetry)
suite.dockerutil, err = docker.GetDockerUtil()
diff --git a/test/new-e2e/examples/gke_autopilot_test.go b/test/new-e2e/examples/gke_autopilot_test.go
new file mode 100644
index 0000000000000..9317ace5b67be
--- /dev/null
+++ b/test/new-e2e/examples/gke_autopilot_test.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+package examples
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ gcpkubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/kubernetes"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+)
+
+type gkeAutopilotSuite struct {
+ e2e.BaseSuite[environments.Kubernetes]
+}
+
+func TestGKEAutopilotSuite(t *testing.T) {
+ e2e.Run(t, &gkeAutopilotSuite{}, e2e.WithProvisioner(gcpkubernetes.GKEProvisioner(gcpkubernetes.WithGKEOptions(gke.WithAutopilot()), gcpkubernetes.WithAgentOptions(kubernetesagentparams.WithGKEAutopilot()))))
+}
+
+func (v *gkeAutopilotSuite) TestGKE() {
+ v.T().Log("Running GKE test")
+ res, _ := v.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.TODO(), v1.ListOptions{})
+ var clusterAgent corev1.Pod
+ containsClusterAgent := false
+ for _, pod := range res.Items {
+ if strings.Contains(pod.Name, "cluster-agent") {
+ containsClusterAgent = true
+ clusterAgent = pod
+ break
+ }
+ }
+ assert.True(v.T(), containsClusterAgent, "Cluster Agent not found")
+
+ stdout, stderr, err := v.Env().KubernetesCluster.KubernetesClient.
+ PodExec("datadog", clusterAgent.Name, "cluster-agent", []string{"ls"})
+ require.NoError(v.T(), err)
+ assert.Empty(v.T(), stderr)
+ assert.NotEmpty(v.T(), stdout)
+}
diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod
index b31bcd112bc3c..520e01a031069 100644
--- a/test/new-e2e/go.mod
+++ b/test/new-e2e/go.mod
@@ -60,7 +60,7 @@ require (
// `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version
// Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB
// => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB
- github.com/DataDog/test-infra-definitions v0.0.0-20241112113900-ec7e1e40abcd
+ github.com/DataDog/test-infra-definitions v0.0.0-20241114152759-b436617374bf
github.com/aws/aws-sdk-go-v2 v1.32.2
github.com/aws/aws-sdk-go-v2/config v1.27.40
github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2
@@ -269,7 +269,7 @@ require (
golang.org/x/tools v0.26.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
@@ -304,10 +304,10 @@ require (
github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0 // indirect
github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect
github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect
- go.opentelemetry.io/collector/component v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
)
diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum
index 2c57ce22c255d..d613f612bb153 100644
--- a/test/new-e2e/go.sum
+++ b/test/new-e2e/go.sum
@@ -16,8 +16,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg=
-github.com/DataDog/test-infra-definitions v0.0.0-20241112113900-ec7e1e40abcd h1:S72kcHk/XqSv2Tn0KLc75jaKd8GG6mJIK/UYUW3RUlo=
-github.com/DataDog/test-infra-definitions v0.0.0-20241112113900-ec7e1e40abcd/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs=
+github.com/DataDog/test-infra-definitions v0.0.0-20241114152759-b436617374bf h1:wPI1Rnox8xn6I4BCZvKWarlwz8u/yZFt72Ylm82iJ/w=
+github.com/DataDog/test-infra-definitions v0.0.0-20241114152759-b436617374bf/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A=
@@ -511,14 +511,14 @@ github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes=
github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
@@ -700,8 +700,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
index 58fce4c5a9132..53bc559b4db0e 100644
--- a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
+++ b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
@@ -88,7 +88,7 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
return err
}
- kindCluster, err := kubeComp.NewKindCluster(&awsEnv, host, awsEnv.CommonNamer().ResourceName("kind"), params.name, awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ kindCluster, err := kubeComp.NewKindCluster(&awsEnv, host, params.name, awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
if err != nil {
return err
}
diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/params.go b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go
index d42a5dac75f9e..d3e2dc1eac0d1 100644
--- a/test/new-e2e/pkg/environments/gcp/kubernetes/params.go
+++ b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go
@@ -8,6 +8,7 @@ package gcpkubernetes
import (
"fmt"
+
"github.com/DataDog/test-infra-definitions/scenarios/gcp/gke"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
diff --git a/test/new-e2e/pkg/environments/local/kubernetes/kind.go b/test/new-e2e/pkg/environments/local/kubernetes/kind.go
index b8f76751dcf48..90234eaaee38a 100644
--- a/test/new-e2e/pkg/environments/local/kubernetes/kind.go
+++ b/test/new-e2e/pkg/environments/local/kubernetes/kind.go
@@ -131,7 +131,7 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
return err
}
- kindCluster, err := kubeComp.NewLocalKindCluster(&localEnv, localEnv.CommonNamer().ResourceName("kind"), params.name, localEnv.KubernetesVersion())
+ kindCluster, err := kubeComp.NewLocalKindCluster(&localEnv, params.name, localEnv.KubernetesVersion())
if err != nil {
return err
}
diff --git a/test/new-e2e/pkg/runner/parameters/store_env.go b/test/new-e2e/pkg/runner/parameters/store_env.go
index 3c7f1e62f3b13..6e43128a2af69 100644
--- a/test/new-e2e/pkg/runner/parameters/store_env.go
+++ b/test/new-e2e/pkg/runner/parameters/store_env.go
@@ -34,6 +34,9 @@ var envVariablesByStoreKey = map[StoreKey]string{
PulumiLogToStdErr: "E2E_PULUMI_LOG_TO_STDERR",
PulumiVerboseProgressStreams: "E2E_PULUMI_VERBOSE_PROGRESS_STREAMS",
DevMode: "E2E_DEV_MODE",
+ InitOnly: "E2E_INIT_ONLY",
+ MajorVersion: "E2E_MAJOR_VERSION",
+ PreInitialized: "E2E_PRE_INITIALIZED",
}
type envValueStore struct {
diff --git a/test/new-e2e/system-probe/test-json-review/main.go b/test/new-e2e/system-probe/test-json-review/main.go
index cb3ed3962df33..6c93b75079a5d 100644
--- a/test/new-e2e/system-probe/test-json-review/main.go
+++ b/test/new-e2e/system-probe/test-json-review/main.go
@@ -195,20 +195,23 @@ func reviewTestsReaders(jf io.Reader, ff io.Reader, owners *testowners) (*review
}
// check if a subtest also failed and is marked as flaky
- hasFlakyChild := false
+ hasFlakyChild, hasFailedChild := false, false
for _, failedTest := range tests {
- if kf.IsFlaky(pkg, failedTest) && strings.HasPrefix(failedTest, test+"/") {
- hasFlakyChild = true
- break
+ if strings.HasPrefix(failedTest, test+"/") {
+ if kf.IsFlaky(pkg, failedTest) {
+ hasFlakyChild = true
+ } else {
+ hasFailedChild = true
+ }
}
}
- if hasFlakyChild {
+ // Only mark parent as flaky if all children are flaky/passing, if we have failed childs it's a real failure
+ if hasFlakyChild && !hasFailedChild {
flakyTestsOut.WriteString(addOwnerInformation(fmt.Sprintf(flakyFormat, pkg, test), owner))
- continue
+ } else {
+ failedTestsOut.WriteString(addOwnerInformation(fmt.Sprintf(failFormat, pkg, test), owner))
}
-
- failedTestsOut.WriteString(addOwnerInformation(fmt.Sprintf(failFormat, pkg, test), owner))
}
}
diff --git a/test/new-e2e/system-probe/test-json-review/main_test.go b/test/new-e2e/system-probe/test-json-review/main_test.go
index e3d32a2a1478a..db6d09a8b110f 100644
--- a/test/new-e2e/system-probe/test-json-review/main_test.go
+++ b/test/new-e2e/system-probe/test-json-review/main_test.go
@@ -16,6 +16,14 @@ import (
"github.com/stretchr/testify/require"
)
+const (
+ // Add newlines to the end of the format strings to match the expected output, which
+ // is added by addOwnerInformation
+ flakyFormatTest = flakyFormat + "\n"
+ failFormatTest = failFormat + "\n"
+ rerunFormatTest = rerunFormat + "\n"
+)
+
const flakeTestData = `{"Time":"2024-06-14T22:24:53.156240262Z","Action":"run","Package":"a/b/c","Test":"testname"}
{"Time":"2024-06-14T22:24:53.156263319Z","Action":"output","Package":"a/b/c","Test":"testname","Output":"=== RUN testname\n"}
{"Time":"2024-06-14T22:24:53.156271614Z","Action":"output","Package":"a/b/c","Test":"testname","Output":" file_test.go:10: flakytest: this is a known flaky test\n"}
@@ -50,19 +58,29 @@ const flakyFailWithParentFail = `{"Time":"2024-06-14T22:24:52.156240262Z","Actio
{"Time":"2024-06-14T22:26:02.039003529Z","Action":"fail","Package":"a/b/c","Test":"testparent/testname","Elapsed":26.25}
{"Time":"2024-06-14T22:26:03.039003529Z","Action":"fail","Package":"a/b/c","Test":"testparent","Elapsed":28.25}
`
+const parentWithFlakyAndNormalFail = `{"Time":"2024-06-14T22:24:52.156240262Z","Action":"run","Package":"a/b/c","Test":"testparent"}
+{"Time":"2024-06-14T22:24:53.156240262Z","Action":"run","Package":"a/b/c","Test":"testparent/testname"}
+{"Time":"2024-06-14T22:24:53.156263319Z","Action":"output","Package":"a/b/c","Test":"testparent/testname","Output":"=== RUN testparent/testname\n"}
+{"Time":"2024-06-14T22:24:53.156271614Z","Action":"output","Package":"a/b/c","Test":"testparent/testname","Output":" file_test.go:10: flakytest: this is a known flaky test\n"}
+{"Time":"2024-06-14T22:24:54.039003529Z","Action":"fail","Package":"a/b/c","Test":"testparent/testname","Elapsed":26.25}
+{"Time":"2024-06-14T22:24:55.156240262Z","Action":"run","Package":"a/b/c","Test":"testparent/testname2"}
+{"Time":"2024-06-14T22:24:55.156263319Z","Action":"output","Package":"a/b/c","Test":"testparent/testname2","Output":"=== RUN testparent/testname2\n"}
+{"Time":"2024-06-14T22:26:02.039003529Z","Action":"fail","Package":"a/b/c","Test":"testparent/testname2","Elapsed":26.25}
+{"Time":"2024-06-14T22:26:03.039003529Z","Action":"fail","Package":"a/b/c","Test":"testparent","Elapsed":28.25}
+`
func TestFlakeInOutput(t *testing.T) {
out, err := reviewTestsReaders(bytes.NewBuffer([]byte(flakeTestData)), nil, nil)
require.NoError(t, err)
assert.Empty(t, out.Failed)
- assert.Equal(t, fmt.Sprintf(flakyFormat, "a/b/c", "testname"), out.Flaky)
+ assert.Equal(t, fmt.Sprintf(flakyFormatTest, "a/b/c", "testname"), out.Flaky)
assert.Empty(t, out.ReRuns)
}
func TestFailedInOutput(t *testing.T) {
out, err := reviewTestsReaders(bytes.NewBuffer([]byte(failedTestData)), nil, nil)
require.NoError(t, err)
- assert.Equal(t, fmt.Sprintf(failFormat, "a/b/c", "testname"), out.Failed)
+ assert.Equal(t, fmt.Sprintf(failFormatTest, "a/b/c", "testname"), out.Failed)
assert.Empty(t, out.Flaky)
assert.Empty(t, out.ReRuns)
}
@@ -72,13 +90,13 @@ func TestRerunInOutput(t *testing.T) {
require.NoError(t, err)
assert.Empty(t, out.Failed)
assert.Empty(t, out.Flaky)
- assert.Equal(t, fmt.Sprintf(rerunFormat, "a/b/c", "testname", "pass"), out.ReRuns)
+ assert.Equal(t, fmt.Sprintf(rerunFormatTest, "a/b/c", "testname", "pass"), out.ReRuns)
}
func TestOnlyParentOfFlakeFailed(t *testing.T) {
out, err := reviewTestsReaders(bytes.NewBuffer([]byte(onlyParentOfFlakeFailed)), nil, nil)
require.NoError(t, err)
- assert.Equal(t, fmt.Sprintf(failFormat, "a/b/c", "testparent"), out.Failed)
+ assert.Equal(t, fmt.Sprintf(failFormatTest, "a/b/c", "testparent"), out.Failed)
assert.Empty(t, out.Flaky)
assert.Empty(t, out.ReRuns)
}
@@ -87,10 +105,24 @@ func TestParentOfFlakeIsFlake(t *testing.T) {
out, err := reviewTestsReaders(bytes.NewBuffer([]byte(flakyFailWithParentFail)), nil, nil)
require.NoError(t, err)
- flakyParent := fmt.Sprintf(flakyFormat, "a/b/c", "testparent")
- flakyChild := fmt.Sprintf(flakyFormat, "a/b/c", "testparent/testname")
+ flakyParent := fmt.Sprintf(flakyFormatTest, "a/b/c", "testparent")
+ flakyChild := fmt.Sprintf(flakyFormatTest, "a/b/c", "testparent/testname")
assert.Empty(t, out.Failed)
- assert.Equal(t, fmt.Sprintf("%s\n%s\n", flakyParent, flakyChild), out.Flaky)
+ assert.Equal(t, flakyParent+flakyChild, out.Flaky)
+ assert.Empty(t, out.ReRuns)
+}
+
+func TestParentOfFlakeAndFailIsFailed(t *testing.T) {
+ out, err := reviewTestsReaders(bytes.NewBuffer([]byte(parentWithFlakyAndNormalFail)), nil, nil)
+ require.NoError(t, err)
+
+ flakyChild := fmt.Sprintf(flakyFormatTest, "a/b/c", "testparent/testname")
+ failChild := fmt.Sprintf(failFormatTest, "a/b/c", "testparent/testname2")
+ failParent := fmt.Sprintf(failFormatTest, "a/b/c", "testparent")
+ failStr := failParent + failChild
+
+ assert.Equal(t, failStr, out.Failed)
+ assert.Equal(t, flakyChild, out.Flaky)
assert.Empty(t, out.ReRuns)
}
diff --git a/test/new-e2e/system-probe/test-json-review/testowners.go b/test/new-e2e/system-probe/test-json-review/testowners.go
index 5677efe029ce0..0ec0165d55488 100644
--- a/test/new-e2e/system-probe/test-json-review/testowners.go
+++ b/test/new-e2e/system-probe/test-json-review/testowners.go
@@ -8,7 +8,7 @@
package main
import (
- "debug/elf"
+ "debug/elf" //nolint:depguard
"debug/gosym"
"fmt"
"io"
diff --git a/test/new-e2e/tests/agent-metrics-logs/kindfilelogging/kind.go b/test/new-e2e/tests/agent-metrics-logs/kindfilelogging/kind.go
index 7e516169873ca..d66fa4f20746d 100644
--- a/test/new-e2e/tests/agent-metrics-logs/kindfilelogging/kind.go
+++ b/test/new-e2e/tests/agent-metrics-logs/kindfilelogging/kind.go
@@ -147,7 +147,7 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
return fmt.Errorf("ec2.InstallECRCredentialsHelper %w", err)
}
- kindCluster, err := kubeComp.NewKindCluster(&awsEnv, host, awsEnv.CommonNamer().ResourceName("kind"), params.name, awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ kindCluster, err := kubeComp.NewKindCluster(&awsEnv, host, params.name, awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
if err != nil {
return fmt.Errorf("kubeComp.NewKindCluster: %w", err)
}
diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go
index ce0a53578e40c..3629b348da837 100644
--- a/test/new-e2e/tests/containers/k8s_test.go
+++ b/test/new-e2e/tests/containers/k8s_test.go
@@ -197,17 +197,15 @@ func (suite *k8sSuite) TestAdmissionControllerWebhooksExist() {
expectedWebhookName := "datadog-webhook"
suite.Run("agent registered mutating webhook configuration", func() {
- mutatingConfigs, err := suite.K8sClient.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{})
+ mutatingConfig, err := suite.K8sClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{})
suite.Require().NoError(err)
- suite.NotEmpty(mutatingConfigs.Items, "No mutating webhook configuration found")
- found := false
- for _, mutatingConfig := range mutatingConfigs.Items {
- if mutatingConfig.Name == expectedWebhookName {
- found = true
- break
- }
- }
- suite.Require().True(found, fmt.Sprintf("None of the mutating webhook configurations have the name '%s'", expectedWebhookName))
+ suite.NotNilf(mutatingConfig, "None of the mutating webhook configurations have the name '%s'", expectedWebhookName)
+ })
+
+ suite.Run("agent registered validating webhook configuration", func() {
+ validatingConfig, err := suite.K8sClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, expectedWebhookName, metav1.GetOptions{})
+ suite.Require().NoError(err)
+ suite.NotNilf(validatingConfig, "None of the validating webhook configurations have the name '%s'", expectedWebhookName)
})
}
diff --git a/test/new-e2e/tests/gpu/gpu_test.go b/test/new-e2e/tests/gpu/gpu_test.go
index 719e2f579b944..ca886450572c8 100644
--- a/test/new-e2e/tests/gpu/gpu_test.go
+++ b/test/new-e2e/tests/gpu/gpu_test.go
@@ -9,6 +9,7 @@ import (
"encoding/json"
"flag"
"fmt"
+ "slices"
"strings"
"testing"
"time"
@@ -160,6 +161,12 @@ func (v *gpuSuite) TestVectorAddProgramDetected() {
metrics, err := v.Env().FakeIntake.Client().FilterMetrics(metricName, client.WithMetricValueHigherThan(0))
assert.NoError(c, err)
assert.Greater(c, len(metrics), 0, "no '%s' with value higher than 0 yet", metricName)
+
+ for _, metric := range metrics {
+ assert.True(c, slices.ContainsFunc(metric.Tags, func(tag string) bool {
+ return strings.HasPrefix(tag, "gpu_uuid:")
+ }), "no gpu_uuid tag found in %v", metric)
+ }
}
}, 5*time.Minute, 10*time.Second)
}
diff --git a/test/new-e2e/tests/installer/unix/package_apm_inject_test.go b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go
index 926a6210e00ef..21ad610c25b45 100644
--- a/test/new-e2e/tests/installer/unix/package_apm_inject_test.go
+++ b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go
@@ -167,6 +167,9 @@ func (s *packageApmInjectSuite) TestUpgrade_InjectorDeb_To_InjectorOCI() {
"TESTING_YUM_VERSION_PATH=",
"DD_REPO_URL=datadoghq.com",
)
+ s.host.Run("sudo apt-get install -y datadog-apm-inject datadog-apm-library-python || sudo yum install -y datadog-apm-inject datadog-apm-library-python")
+ s.host.Run("sudo dd-container-install --no-agent-restart")
+ s.host.Run("sudo dd-host-install --no-agent-restart")
defer s.Purge()
defer s.purgeInjectorDebInstall()
@@ -217,6 +220,7 @@ func (s *packageApmInjectSuite) TestUpgrade_InjectorOCI_To_InjectorDeb() {
"TESTING_YUM_VERSION_PATH=",
"DD_REPO_URL=datadoghq.com",
)
+ s.host.Run("sudo apt-get install -y datadog-apm-inject datadog-apm-library-python || sudo yum install -y datadog-apm-inject datadog-apm-library-python")
defer s.purgeInjectorDebInstall()
// OCI mustn't be overridden
@@ -360,6 +364,7 @@ func (s *packageApmInjectSuite) TestInstrumentScripts() {
"TESTING_YUM_VERSION_PATH=",
"DD_REPO_URL=datadoghq.com",
)
+ s.host.Run("sudo apt-get install -y datadog-apm-inject datadog-apm-library-python || sudo yum install -y datadog-apm-inject datadog-apm-library-python")
defer s.Purge()
defer s.purgeInjectorDebInstall()
@@ -531,10 +536,6 @@ func (s *packageApmInjectSuite) purgeInjectorDebInstall() {
packageList := []string{
"datadog-agent",
"datadog-apm-inject",
- "datadog-apm-library-java",
- "datadog-apm-library-ruby",
- "datadog-apm-library-js",
- "datadog-apm-library-dotnet",
"datadog-apm-library-python",
}
s.Env().RemoteHost.Execute(fmt.Sprintf("sudo apt-get remove -y --purge %[1]s || sudo yum remove -y %[1]s", strings.Join(packageList, " ")))
diff --git a/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go
index d8811f7c81a2e..f3f9e11843a7f 100644
--- a/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go
+++ b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go
@@ -10,7 +10,6 @@ import (
"fmt"
"time"
- "github.com/DataDog/datadog-agent/pkg/util/testutil/flake"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
"github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/host"
e2eos "github.com/DataDog/test-infra-definitions/components/os"
@@ -445,7 +444,6 @@ func (s *upgradeScenarioSuite) TestUpgradeConfigFromExistingExperiment() {
}
func (s *upgradeScenarioSuite) TestUpgradeConfigFailure() {
- flake.Mark(s.T()) // TODO(baptiste): Remove & fix
localCDN := host.NewLocalCDN(s.host)
localCDN.AddLayer("config", "\"log_level\": \"debug\"")
s.RunInstallScript(
@@ -497,6 +495,7 @@ func (s *upgradeScenarioSuite) TestUpgradeConfigFailure() {
}
func (s *upgradeScenarioSuite) startExperiment(pkg packageName, version string) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo datadog-installer daemon start-experiment %s %s > /tmp/start_experiment.log 2>&1", pkg, version)
s.T().Logf("Running start command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd)
@@ -513,6 +512,7 @@ func (s *upgradeScenarioSuite) mustStartExperiment(pkg packageName, version stri
}
func (s *upgradeScenarioSuite) promoteExperiment(pkg packageName) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo datadog-installer daemon promote-experiment %s > /tmp/promote_experiment.log 2>&1", pkg)
s.T().Logf("Running promote command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd)
@@ -529,6 +529,7 @@ func (s *upgradeScenarioSuite) mustPromoteExperiment(pkg packageName) string {
}
func (s *upgradeScenarioSuite) stopExperiment(pkg packageName) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo datadog-installer daemon stop-experiment %s > /tmp/stop_experiment.log 2>&1", pkg)
s.T().Logf("Running stop command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd)
@@ -624,6 +625,7 @@ func (s *upgradeScenarioSuite) assertSuccessfulAgentStopExperiment(timestamp hos
}
func (s *upgradeScenarioSuite) startConfigExperiment(localCDNPath string, pkg packageName, hash string) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo -E datadog-installer install-config-experiment %s %s > /tmp/start_config_experiment.log 2>&1", pkg, hash)
s.T().Logf("Running start command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath, "DD_REMOTE_POLICIES": "true"}))
@@ -640,6 +642,7 @@ func (s *upgradeScenarioSuite) mustStartConfigExperiment(localCDNPath string, pk
}
func (s *upgradeScenarioSuite) promoteConfigExperiment(localCDNPath string, pkg packageName) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo -E datadog-installer promote-config-experiment %s > /tmp/promote_config_experiment.log 2>&1", pkg)
s.T().Logf("Running promote command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath, "DD_REMOTE_POLICIES": "true"}))
@@ -656,6 +659,7 @@ func (s *upgradeScenarioSuite) mustPromoteConfigExperiment(localCDNPath string,
}
func (s *upgradeScenarioSuite) stopConfigExperiment(localCDNPath string, pkg packageName) (string, error) {
+ s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock")
cmd := fmt.Sprintf("sudo -E datadog-installer remove-config-experiment %s > /tmp/stop_config_experiment.log 2>&1", pkg)
s.T().Logf("Running stop command: %s", cmd)
return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath, "DD_REMOTE_POLICIES": "true"}))
diff --git a/test/new-e2e/tests/orchestrator/apply.go b/test/new-e2e/tests/orchestrator/apply.go
index 1f1f90dc1dbd2..f7300ddc78833 100644
--- a/test/new-e2e/tests/orchestrator/apply.go
+++ b/test/new-e2e/tests/orchestrator/apply.go
@@ -67,7 +67,7 @@ func createCluster(ctx *pulumi.Context) (*resAws.Environment, *localKubernetes.C
return nil, nil, nil, err
}
- kindCluster, err := localKubernetes.NewKindCluster(&awsEnv, vm, awsEnv.CommonNamer().ResourceName("kind"), "kind", awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ kindCluster, err := localKubernetes.NewKindCluster(&awsEnv, vm, "kind", awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
if err != nil {
return nil, nil, nil, err
}
diff --git a/test/new-e2e/tests/orchestrator/k8s_test.go b/test/new-e2e/tests/orchestrator/k8s_test.go
index b17ccfbb2cbd4..fdbe0b9320206 100644
--- a/test/new-e2e/tests/orchestrator/k8s_test.go
+++ b/test/new-e2e/tests/orchestrator/k8s_test.go
@@ -6,10 +6,12 @@
package orchestrator
import (
+ "fmt"
"strings"
"time"
agentmodel "github.com/DataDog/agent-payload/v5/process"
+
"github.com/DataDog/datadog-agent/test/fakeintake/aggregator"
fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client"
)
@@ -32,7 +34,7 @@ func (suite *k8sSuite) TestNode() {
expectAtLeastOneResource{
filter: &fakeintake.PayloadFilter{ResourceType: agentmodel.TypeCollectorNode},
test: func(payload *aggregator.OrchestratorPayload) bool {
- return payload.Node.Metadata.Name == "kind-control-plane"
+ return payload.Node.Metadata.Name == fmt.Sprintf("%s-control-plane", suite.KubeClusterName)
},
message: "find a control plane node",
timeout: defaultTimeout,
diff --git a/test/new-e2e/tests/otel/otel-agent/complete_test.go b/test/new-e2e/tests/otel/otel-agent/complete_test.go
index 9065dd0a095f4..357a6e754cee6 100644
--- a/test/new-e2e/tests/otel/otel-agent/complete_test.go
+++ b/test/new-e2e/tests/otel/otel-agent/complete_test.go
@@ -44,7 +44,7 @@ agents:
func (s *completeTestSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
- utils.TestCalendarApp(s)
+ utils.TestCalendarApp(s, false)
}
func (s *completeTestSuite) TestOTLPTraces() {
diff --git a/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go b/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go
index f151b745ea624..4bbfc390a861b 100644
--- a/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go
+++ b/test/new-e2e/tests/otel/otel-agent/infraattributes_eks_test.go
@@ -43,7 +43,7 @@ var eksParams = utils.IAParams{
func (s *iaEKSTestSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
- utils.TestCalendarApp(s)
+ utils.TestCalendarApp(s, false)
}
func (s *iaEKSTestSuite) TestOTLPTraces() {
@@ -65,3 +65,35 @@ func (s *iaEKSTestSuite) TestHosts() {
func (s *iaEKSTestSuite) TestPrometheusMetrics() {
utils.TestPrometheusMetrics(s)
}
+
+type iaUSTEKSTestSuite struct {
+ e2e.BaseSuite[environments.Kubernetes]
+}
+
+func TestOTelAgentIAUSTEKS(t *testing.T) {
+ values := `
+datadog:
+ logs:
+ containerCollectAll: false
+ containerCollectUsingFiles: false
+`
+ t.Parallel()
+ e2e.Run(t, &iaUSTEKSTestSuite{}, e2e.WithProvisioner(awskubernetes.EKSProvisioner(awskubernetes.WithEKSOptions(eks.WithLinuxNodeGroup()), awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(iaConfig)))))
+}
+
+func (s *iaUSTEKSTestSuite) SetupSuite() {
+ s.BaseSuite.SetupSuite()
+ utils.TestCalendarApp(s, true)
+}
+
+func (s *iaUSTEKSTestSuite) TestOTLPTraces() {
+ utils.TestTraces(s, eksParams)
+}
+
+func (s *iaUSTEKSTestSuite) TestOTLPMetrics() {
+ utils.TestMetrics(s, eksParams)
+}
+
+func (s *iaUSTEKSTestSuite) TestOTLPLogs() {
+ utils.TestLogs(s, eksParams)
+}
diff --git a/test/new-e2e/tests/otel/otel-agent/infraattributes_test.go b/test/new-e2e/tests/otel/otel-agent/infraattributes_test.go
index 6825d6a57ee2b..cf330ff1ba7ea 100644
--- a/test/new-e2e/tests/otel/otel-agent/infraattributes_test.go
+++ b/test/new-e2e/tests/otel/otel-agent/infraattributes_test.go
@@ -45,7 +45,7 @@ var iaParams = utils.IAParams{
func (s *iaTestSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
- utils.TestCalendarApp(s)
+ utils.TestCalendarApp(s, false)
}
func (s *iaTestSuite) TestOTLPTraces() {
diff --git a/test/new-e2e/tests/otel/otel-agent/minimal_test.go b/test/new-e2e/tests/otel/otel-agent/minimal_test.go
index 33ad5de219edd..47c0076ffce3a 100644
--- a/test/new-e2e/tests/otel/otel-agent/minimal_test.go
+++ b/test/new-e2e/tests/otel/otel-agent/minimal_test.go
@@ -54,7 +54,7 @@ var minimalParams = utils.IAParams{
func (s *minimalTestSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
- utils.TestCalendarApp(s)
+ utils.TestCalendarApp(s, false)
}
func (s *minimalTestSuite) TestOTLPTraces() {
diff --git a/test/new-e2e/tests/otel/otel-agent/testdata/minimal-full-config.yml b/test/new-e2e/tests/otel/otel-agent/testdata/minimal-full-config.yml
index d2ec1d87f5981..72ebf6d0f7c59 100644
--- a/test/new-e2e/tests/otel/otel-agent/testdata/minimal-full-config.yml
+++ b/test/new-e2e/tests/otel/otel-agent/testdata/minimal-full-config.yml
@@ -16,7 +16,7 @@ exporters:
datadog:
api:
fail_on_invalid_key: false
- key: '[REDACTED]'
+ key: "[REDACTED]"
site: datadoghq.com
auth: null
compression: ""
@@ -79,15 +79,15 @@ exporters:
timeout: 15s
tls:
ca_file: ""
- ca_pem: '[REDACTED]'
+ ca_pem: "[REDACTED]"
cert_file: ""
- cert_pem: '[REDACTED]'
+ cert_pem: "[REDACTED]"
cipher_suites: []
include_system_ca_certs_pool: false
insecure: false
insecure_skip_verify: false
key_file: ""
- key_pem: '[REDACTED]'
+ key_pem: "[REDACTED]"
max_version: ""
min_version: ""
reload_interval: 0s
@@ -207,30 +207,30 @@ receivers:
evaluation_interval: 1m
scrape_interval: 1m
scrape_protocols:
- - OpenMetricsText1.0.0
- - OpenMetricsText0.0.1
- - PrometheusText0.0.4
+ - OpenMetricsText1.0.0
+ - OpenMetricsText0.0.1
+ - PrometheusText0.0.4
scrape_timeout: 10s
runtime:
gogc: 75
scrape_configs:
- - enable_compression: true
- enable_http2: true
- follow_redirects: true
- honor_timestamps: true
- job_name: datadog-agent
- metrics_path: /metrics
- scheme: http
- scrape_interval: 10s
- scrape_protocols:
- - OpenMetricsText1.0.0
- - OpenMetricsText0.0.1
- - PrometheusText0.0.4
- scrape_timeout: 10s
- static_configs:
- - targets:
- - 0.0.0.0:8888
- track_timestamps_staleness: false
+ - enable_compression: true
+ enable_http2: true
+ follow_redirects: true
+ honor_timestamps: true
+ job_name: datadog-agent
+ metrics_path: /metrics
+ scheme: http
+ scrape_interval: 10s
+ scrape_protocols:
+ - OpenMetricsText1.0.0
+ - OpenMetricsText0.0.1
+ - PrometheusText0.0.4
+ scrape_timeout: 10s
+ static_configs:
+ - targets:
+ - 0.0.0.0:8888
+ track_timestamps_staleness: false
report_extra_scrape_metrics: false
start_time_metric_regex: ""
target_allocator: null
@@ -238,50 +238,50 @@ receivers:
use_start_time_metric: false
service:
extensions:
- - pprof/dd-autoconfigured
- - zpages/dd-autoconfigured
- - health_check/dd-autoconfigured
- - ddflare/dd-autoconfigured
+ - pprof/dd-autoconfigured
+ - zpages/dd-autoconfigured
+ - health_check/dd-autoconfigured
+ - ddflare/dd-autoconfigured
pipelines:
logs:
exporters:
- - datadog
+ - datadog
processors:
- - batch
- - infraattributes/dd-autoconfigured
+ - batch
+ - infraattributes/dd-autoconfigured
receivers:
- - otlp
+ - otlp
metrics:
exporters:
- - datadog
+ - datadog
processors:
- - batch
- - infraattributes/dd-autoconfigured
+ - batch
+ - infraattributes/dd-autoconfigured
receivers:
- - otlp
- - datadog/connector
+ - otlp
+ - datadog/connector
metrics/dd-autoconfigured/datadog:
exporters:
- - datadog
+ - datadog
processors: []
receivers:
- - prometheus/dd-autoconfigured
+ - prometheus/dd-autoconfigured
traces:
exporters:
- - datadog/connector
+ - datadog/connector
processors:
- - batch
- - infraattributes/dd-autoconfigured
+ - batch
+ - infraattributes/dd-autoconfigured
receivers:
- - otlp
+ - otlp
traces/send:
exporters:
- - datadog
+ - datadog
processors:
- - batch
- - infraattributes/dd-autoconfigured
+ - batch
+ - infraattributes/dd-autoconfigured
receivers:
- - otlp
+ - otlp
telemetry:
logs:
development: false
@@ -289,11 +289,12 @@ service:
disable_stacktrace: false
encoding: console
error_output_paths:
- - stderr
+ - stderr
initial_fields: {}
level: info
output_paths:
- - stderr
+ - stderr
+ processors: []
sampling:
enabled: true
initial: 10
diff --git a/test/new-e2e/tests/otel/otel-agent/testdata/minimal-provided-config.yml b/test/new-e2e/tests/otel/otel-agent/testdata/minimal-provided-config.yml
index e1575f67db452..6a20e99ee33ed 100644
--- a/test/new-e2e/tests/otel/otel-agent/testdata/minimal-provided-config.yml
+++ b/test/new-e2e/tests/otel/otel-agent/testdata/minimal-provided-config.yml
@@ -16,7 +16,7 @@ exporters:
datadog:
api:
fail_on_invalid_key: false
- key: '[REDACTED]'
+ key: "[REDACTED]"
site: datadoghq.com
auth: null
compression: ""
@@ -79,15 +79,15 @@ exporters:
timeout: 15s
tls:
ca_file: ""
- ca_pem: '[REDACTED]'
+ ca_pem: "[REDACTED]"
cert_file: ""
- cert_pem: '[REDACTED]'
+ cert_pem: "[REDACTED]"
cipher_suites: []
include_system_ca_certs_pool: false
insecure: false
insecure_skip_verify: false
key_file: ""
- key_pem: '[REDACTED]'
+ key_pem: "[REDACTED]"
max_version: ""
min_version: ""
reload_interval: 0s
@@ -146,33 +146,33 @@ service:
pipelines:
logs:
exporters:
- - datadog
+ - datadog
processors:
- - batch
+ - batch
receivers:
- - otlp
+ - otlp
metrics:
exporters:
- - datadog
+ - datadog
processors:
- - batch
+ - batch
receivers:
- - otlp
- - datadog/connector
+ - otlp
+ - datadog/connector
traces:
exporters:
- - datadog/connector
+ - datadog/connector
processors:
- - batch
+ - batch
receivers:
- - otlp
+ - otlp
traces/send:
exporters:
- - datadog
+ - datadog
processors:
- - batch
+ - batch
receivers:
- - otlp
+ - otlp
telemetry:
logs:
development: false
@@ -180,11 +180,12 @@ service:
disable_stacktrace: false
encoding: console
error_output_paths:
- - stderr
+ - stderr
initial_fields: {}
level: info
output_paths:
- - stderr
+ - stderr
+ processors: []
sampling:
enabled: true
initial: 10
diff --git a/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go
index ff3621d2a31da..b91361b7e9866 100644
--- a/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go
+++ b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go
@@ -52,7 +52,7 @@ var otlpIngestParams = utils.IAParams{
func (s *otlpIngestTestSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
- utils.TestCalendarApp(s)
+ utils.TestCalendarApp(s, false)
}
func (s *otlpIngestTestSuite) TestOTLPTraces() {
diff --git a/test/new-e2e/tests/otel/utils/pipelines_utils.go b/test/new-e2e/tests/otel/utils/pipelines_utils.go
index 755df612cb54d..e955e394f9da8 100644
--- a/test/new-e2e/tests/otel/utils/pipelines_utils.go
+++ b/test/new-e2e/tests/otel/utils/pipelines_utils.go
@@ -457,13 +457,13 @@ func createTelemetrygenJob(ctx context.Context, s OTelTestSuite, telemetry strin
}
// TestCalendarApp tests that OTLP metrics are received through OTel pipelines as expected
-func TestCalendarApp(s OTelTestSuite) {
+func TestCalendarApp(s OTelTestSuite, ust bool) {
ctx := context.Background()
err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators()
require.NoError(s.T(), err)
s.T().Log("Starting calendar app")
- createCalendarApp(ctx, s)
+ createCalendarApp(ctx, s, ust)
// Wait for calendar app to start
require.EventuallyWithT(s.T(), func(c *assert.CollectT) {
@@ -473,7 +473,7 @@ func TestCalendarApp(s OTelTestSuite) {
}, 30*time.Minute, 10*time.Second)
}
-func createCalendarApp(ctx context.Context, s OTelTestSuite) {
+func createCalendarApp(ctx context.Context, s OTelTestSuite, ust bool) {
var replicas int32 = 1
name := fmt.Sprintf("calendar-rest-go-%v", strings.ReplaceAll(strings.ToLower(s.T().Name()), "/", "-"))
@@ -567,43 +567,7 @@ func createCalendarApp(ctx context.Context, s OTelTestSuite) {
},
},
},
- Env: []corev1.EnvVar{{
- Name: "OTEL_SERVICE_NAME",
- Value: calendarService,
- }, {
- Name: "OTEL_CONTAINER_NAME",
- Value: name,
- }, {
- Name: "OTEL_K8S_NAMESPACE",
- ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
- }, {
- Name: "OTEL_K8S_NODE_NAME",
- ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}},
- }, {
- Name: "OTEL_K8S_POD_NAME",
- ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}},
- }, {
- Name: "OTEL_K8S_POD_ID",
- ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.uid"}},
- }, {
- Name: "OTEL_EXPORTER_OTLP_ENDPOINT",
- Value: otlpEndpoint,
- }, {
- Name: "OTEL_EXPORTER_OTLP_PROTOCOL",
- Value: "grpc",
- }, {
- Name: "OTEL_RESOURCE_ATTRIBUTES",
- Value: "service.name=$(OTEL_SERVICE_NAME)," +
- "k8s.namespace.name=$(OTEL_K8S_NAMESPACE)," +
- "k8s.node.name=$(OTEL_K8S_NODE_NAME)," +
- "k8s.pod.name=$(OTEL_K8S_POD_NAME)," +
- "k8s.pod.uid=$(OTEL_K8S_POD_ID)," +
- "k8s.container.name=$(OTEL_CONTAINER_NAME)," +
- "host.name=$(OTEL_K8S_NODE_NAME)," +
- fmt.Sprintf("deployment.environment=%v,", env) +
- fmt.Sprintf("service.version=%v,", version) +
- fmt.Sprintf("%v=%v", customAttribute, customAttributeValue),
- }},
+ Env: getCalendarAppEnvVars(name, otlpEndpoint, ust),
},
},
},
@@ -617,6 +581,65 @@ func createCalendarApp(ctx context.Context, s OTelTestSuite) {
require.NoError(s.T(), err, "Could not properly start deployment")
}
+func getCalendarAppEnvVars(name string, otlpEndpoint string, ust bool) []corev1.EnvVar {
+ envVars := []corev1.EnvVar{{
+ Name: "OTEL_CONTAINER_NAME",
+ Value: name,
+ }, {
+ Name: "OTEL_K8S_NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
+ }, {
+ Name: "OTEL_K8S_NODE_NAME",
+ ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}},
+ }, {
+ Name: "OTEL_K8S_POD_NAME",
+ ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}},
+ }, {
+ Name: "OTEL_K8S_POD_ID",
+ ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.uid"}},
+ }, {
+ Name: "OTEL_EXPORTER_OTLP_ENDPOINT",
+ Value: otlpEndpoint,
+ }, {
+ Name: "OTEL_EXPORTER_OTLP_PROTOCOL",
+ Value: "grpc",
+ }}
+ resourceAttrs := "k8s.namespace.name=$(OTEL_K8S_NAMESPACE)," +
+ "k8s.node.name=$(OTEL_K8S_NODE_NAME)," +
+ "k8s.pod.name=$(OTEL_K8S_POD_NAME)," +
+ "k8s.pod.uid=$(OTEL_K8S_POD_ID)," +
+ "k8s.container.name=$(OTEL_CONTAINER_NAME)," +
+ "host.name=$(OTEL_K8S_NODE_NAME)," +
+ fmt.Sprintf("%v=%v", customAttribute, customAttributeValue)
+
+ // Use Unified Service Tagging env vars instead of OTel env vars
+ if ust {
+ return append(envVars, []corev1.EnvVar{{
+ Name: "DD_SERVICE",
+ Value: calendarService,
+ }, {
+ Name: "DD_ENV",
+ Value: env,
+ }, {
+ Name: "DD_VERSION",
+ Value: version,
+ }, {
+ Name: "OTEL_RESOURCE_ATTRIBUTES",
+ Value: resourceAttrs,
+ }}...)
+ }
+
+ return append(envVars, []corev1.EnvVar{{
+ Name: "OTEL_SERVICE_NAME",
+ Value: calendarService,
+ }, {
+ Name: "OTEL_RESOURCE_ATTRIBUTES",
+ Value: resourceAttrs +
+ fmt.Sprintf(",deployment.environment=%v,", env) +
+ fmt.Sprintf("service.version=%v", version),
+ }}...)
+}
+
func testInfraTags(t *testing.T, tags map[string]string, iaParams IAParams) {
assert.NotNil(t, tags["kube_deployment"])
assert.NotNil(t, tags["kube_qos"])
diff --git a/test/new-e2e/tests/security-agent-functional/security_agent_test.go b/test/new-e2e/tests/security-agent-functional/security_agent_test.go
index 545f11529f00e..1b693e1b43b21 100644
--- a/test/new-e2e/tests/security-agent-functional/security_agent_test.go
+++ b/test/new-e2e/tests/security-agent-functional/security_agent_test.go
@@ -36,6 +36,7 @@ var (
func TestVMSuite(t *testing.T) {
flake.Mark(t)
+
suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoAgentNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(componentsos.WindowsDefault))))}
if *devMode {
suiteParams = append(suiteParams, e2e.WithDevMode())
diff --git a/test/otel/go.mod b/test/otel/go.mod
index 74eae7bb38d3f..4dcd299747f4b 100644
--- a/test/otel/go.mod
+++ b/test/otel/go.mod
@@ -88,6 +88,7 @@ replace (
github.com/DataDog/datadog-agent/pkg/util/system => ./../../pkg/util/system
github.com/DataDog/datadog-agent/pkg/util/system/socket => ./../../pkg/util/system/socket
github.com/DataDog/datadog-agent/pkg/util/testutil => ./../../pkg/util/testutil
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ./../../pkg/util/utilizationtracker
github.com/DataDog/datadog-agent/pkg/util/winutil => ./../../pkg/util/winutil
github.com/DataDog/datadog-agent/pkg/version => ./../../pkg/version
)
@@ -108,6 +109,12 @@ require (
github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3
)
+require (
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
+ go.opentelemetry.io/collector/consumer/consumererror v0.113.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect
+)
+
require (
github.com/DataDog/agent-payload/v5 v5.0.119 // indirect
github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect
@@ -174,6 +181,7 @@ require (
github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect
github.com/DataDog/datadog-agent/pkg/util/system v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.1 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.57.1 // indirect
github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
@@ -204,11 +212,11 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -225,7 +233,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
- github.com/klauspost/compress v1.17.10 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
@@ -270,29 +278,28 @@ require (
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/collector/client v1.17.0 // indirect
- go.opentelemetry.io/collector/component v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect
- go.opentelemetry.io/collector/config/confighttp v0.111.0 // indirect
- go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect
- go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
- go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.111.0 // indirect
- go.opentelemetry.io/collector/confmap v1.17.0 // indirect
- go.opentelemetry.io/collector/consumer v0.111.0 // indirect
- go.opentelemetry.io/collector/exporter v0.111.0 // indirect
- go.opentelemetry.io/collector/extension v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect
- go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.11.0 // indirect
- go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
- go.opentelemetry.io/collector/pdata v1.17.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
- go.opentelemetry.io/collector/semconv v0.111.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
+ go.opentelemetry.io/collector/client v1.19.0 // indirect
+ go.opentelemetry.io/collector/component v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configauth v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/confighttp v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/confignet v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configopaque v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect
+ go.opentelemetry.io/collector/config/configtls v1.19.0 // indirect
+ go.opentelemetry.io/collector/config/internal v0.113.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.19.0 // indirect
+ go.opentelemetry.io/collector/consumer v0.113.0 // indirect
+ go.opentelemetry.io/collector/exporter v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect
+ go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.19.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.19.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.113.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.113.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
@@ -313,7 +320,7 @@ require (
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.67.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/test/otel/go.sum b/test/otel/go.sum
index ee08debbb0174..3d185cd732f6b 100644
--- a/test/otel/go.sum
+++ b/test/otel/go.sum
@@ -98,8 +98,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
+github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -113,8 +113,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -191,8 +191,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
@@ -263,6 +263,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -383,72 +385,78 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms=
-go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys=
-go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ=
-go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM=
-go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY=
-go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw=
-go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8=
-go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU=
-go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
-go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc=
-go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684=
-go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8=
-go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E=
-go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g=
-go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
-go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00=
-go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY=
-go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
-go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q=
-go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc=
-go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8=
-go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
-go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM=
-go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k=
-go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI=
-go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s=
-go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U=
-go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4=
-go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU=
-go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ=
-go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw=
-go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8=
-go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI=
-go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk=
-go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg=
-go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM=
-go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY=
-go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc=
-go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8=
-go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w=
-go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58=
-go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s=
-go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s=
-go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls=
-go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ=
-go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g=
-go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM=
-go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ=
-go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s=
-go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
-go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
-go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
-go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
+go.opentelemetry.io/collector/client v1.19.0 h1:TUal8WV1agTrZStgE7BJ8ZC0IHLGtrfgO9ogU9t1mv8=
+go.opentelemetry.io/collector/client v1.19.0/go.mod h1:jgiXMEM6l8L2QEyf2I/M47Zd8+G7e4z+6H8q5SkHOlQ=
+go.opentelemetry.io/collector/component v0.113.0 h1:/nx+RvZgxUEXP+YcTj69rEtuSEGkfaCyp/ad5zQGLjU=
+go.opentelemetry.io/collector/component v0.113.0/go.mod h1:2T779hIGHU9i7xbXbV3q1/JnRw2FyzUYXW2vq47A6EU=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0 h1:iNlUi+vDo7OqQwg4UOjfqqY3Xram4SkdcSZAyxt6QfE=
+go.opentelemetry.io/collector/component/componentstatus v0.113.0/go.mod h1:NF/hVnP0kcsEqV9x14yMxN9GoehuOaVgMpO+s2KE+28=
+go.opentelemetry.io/collector/config/configauth v0.113.0 h1:CBz43fGpN41MwLdwe3mw/XVSIDvGRMT8aaaPuqKukTU=
+go.opentelemetry.io/collector/config/configauth v0.113.0/go.mod h1:Q8SlxrIvL3FJO51hXa4n9ARvox04lK8mmpjf4b3UNAU=
+go.opentelemetry.io/collector/config/configcompression v1.19.0 h1:bTSjTLhnPXX1NSFM6GzguEM/NBe8QUPsXHc9kMOAJzE=
+go.opentelemetry.io/collector/config/configcompression v1.19.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU=
+go.opentelemetry.io/collector/config/confighttp v0.113.0 h1:a6iO0y1ZM5CPDvwbryzU+GpqAtAQ3eSfNseoAUogw7c=
+go.opentelemetry.io/collector/config/confighttp v0.113.0/go.mod h1:JZ9EwoiWMIrXt5v+d/q54TeUhPdAoLDimSEqTtddW6E=
+go.opentelemetry.io/collector/config/confignet v1.19.0 h1:gEDTd8zLx4pPpG5///XPRpbYUpvKsuQzDdM5IEULY9w=
+go.opentelemetry.io/collector/config/confignet v1.19.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg=
+go.opentelemetry.io/collector/config/configopaque v1.19.0 h1:7uvntQeAAtqCaeiS2dDGrT1wLPhWvDlEsD3SliA/koQ=
+go.opentelemetry.io/collector/config/configopaque v1.19.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4=
+go.opentelemetry.io/collector/config/configretry v1.19.0 h1:DEg8PXpo4ahMYgMzZZUU2cPcDF4vqowZlvimJ/t9InY=
+go.opentelemetry.io/collector/config/configretry v1.19.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0 h1:hweTRrVddnUeA3k7HzRY4oUR9lRdMa7of3mHNUS5YyA=
+go.opentelemetry.io/collector/config/configtelemetry v0.113.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc=
+go.opentelemetry.io/collector/config/configtls v1.19.0 h1:GQ/cF1hgNqHVBq2oSSrOFXxVCyMDyd5kq4R/RMEbL98=
+go.opentelemetry.io/collector/config/configtls v1.19.0/go.mod h1:1hyqnYB3JqEUlk1ME/s9HYz4oCRcxQCRxsJitFFT/cA=
+go.opentelemetry.io/collector/config/internal v0.113.0 h1:9RAzH8v7ItFT1npHpvP0SvUzBHcZDliCGRo9Spp6v7c=
+go.opentelemetry.io/collector/config/internal v0.113.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc=
+go.opentelemetry.io/collector/confmap v1.19.0 h1:TQ0lZpAKqgsE0EKk+u4JA+uBbPYeFRmWP3GH43w40CY=
+go.opentelemetry.io/collector/confmap v1.19.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4=
+go.opentelemetry.io/collector/consumer v0.113.0 h1:KJSiK5vSIY9dgPxwKfQ3gOgKtQsqc+7IB7mGhUAL5c8=
+go.opentelemetry.io/collector/consumer v0.113.0/go.mod h1:zHMlXYFaJlZoLCBR6UwWoyXZ/adcO1u2ydqUal3VmYU=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0 h1:Hd2N7n9RKbnKRaVrdw6fPBoQko5zZIgCxwVxkL6SAIE=
+go.opentelemetry.io/collector/consumer/consumererror v0.113.0/go.mod h1:o0MAGFdzcr7LFTUQ6iivPPhbVmn2ZVIYm3FPXk2+JUo=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 h1:RftAcQUY5UOfbEK4s16jnORqTx16y9+PxA1lQwt98cQ=
+go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0/go.mod h1:ZuHrQ4pWguh6dw0DgTfcUtdY/T+cnOJJNP6LMbm5Y5A=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0 h1:ua2AjNx3DUA8qElXNkggB4w3VDL/rBKBvryOQkhumH8=
+go.opentelemetry.io/collector/consumer/consumertest v0.113.0/go.mod h1:vK8o4ZTZSiG3rVyqxZcCNmT/cvEfx34ig7V65L9+6Rg=
+go.opentelemetry.io/collector/exporter v0.113.0 h1:lDZJ6xfuhyLsT/7lqLhIN/ftA6G+9fuYFtubPFvNDxo=
+go.opentelemetry.io/collector/exporter v0.113.0/go.mod h1:0W4NBf5NjWYxR8oJodmOybgN4O0MLazdJwwHevirvXg=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0 h1:8bsk3wYYNr+WAM5nZkFjiLYSTH9MsY2tm7nUpMWt3qc=
+go.opentelemetry.io/collector/exporter/exporterprofiles v0.113.0/go.mod h1:/HFWF846XePYL/qKDtcEAFgkiGSkLUTaC59A5F48axM=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0 h1:U6cRxjJS7td8iNriUI2QfEdH+Yj60ytyvpmnmKTw0+8=
+go.opentelemetry.io/collector/exporter/exportertest v0.113.0/go.mod h1:SRz5jGyAjtNiWwJ93B1+Ndk1p3oFtQsyLw52UGeyRwc=
+go.opentelemetry.io/collector/extension v0.113.0 h1:Vp/YSL8ZCkJQrP1lf2Bm5yaTvcp6ROO3AnfuSL3GEXM=
+go.opentelemetry.io/collector/extension v0.113.0/go.mod h1:Pwp0TNqdHeER4V1I6H6oCvrto/riiOAqs3737BWCnjw=
+go.opentelemetry.io/collector/extension/auth v0.113.0 h1:4ggRy1vepOabUiCWfU+6M9P/ftXojMUNAvBpeLihYj8=
+go.opentelemetry.io/collector/extension/auth v0.113.0/go.mod h1:VbvAm2YZAqePkWgwn0m0vBaq3aC49CxPVwHmrJ24aeQ=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0 h1:Qq4IaB6bMUrf/bWoPZ5ESWywCt+vDi8I/ChYejIEPcc=
+go.opentelemetry.io/collector/extension/experimental/storage v0.113.0/go.mod h1:BRmo+A7f06u/rhyLauU/Vogk+QRN0y1j2VVVgMGWrfQ=
+go.opentelemetry.io/collector/featuregate v1.19.0 h1:ASea2sU+tdpKI3RxIJC/pufDAfwAmrvcQ4EmTHVu0B0=
+go.opentelemetry.io/collector/featuregate v1.19.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
+go.opentelemetry.io/collector/pdata v1.19.0 h1:jmnU5R8TOCbwRr4B8sjdRxM7L5WnEKlQWX1dtLYxIbE=
+go.opentelemetry.io/collector/pdata v1.19.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0 h1:VRf4p0VhfuaR+Epy/nMIlu/9t39WU9CUgHVUvpuGxfU=
+go.opentelemetry.io/collector/pdata/pprofile v0.113.0/go.mod h1:5aDejksdXh5PdJN/OhpzATGT3kbNL0RMmw2Q0Q6E/o0=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0 h1:vRfn85jicO2F4eOTgsWtzmU/K3E/uZUtM1HEefvvJD8=
+go.opentelemetry.io/collector/pdata/testdata v0.113.0/go.mod h1:sR+6eR+YEJhYZu9StbqzeWcCmHpfBAgX/qjP82HY9Gw=
+go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uFxcm2VO+vhad/eE=
+go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg=
+go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k=
+go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk=
+go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE=
+go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA=
+go.opentelemetry.io/collector/processor/processortest v0.113.0/go.mod h1:tNg04r5KlpVx774jSC8U/oYst1eb7WIiz+0AjXKJ0Uw=
+go.opentelemetry.io/collector/receiver v0.113.0 h1:vraAbkPy8Pz9x5X39gV+j9t6x23PNsY2aJ6gQMugRbQ=
+go.opentelemetry.io/collector/receiver v0.113.0/go.mod h1:IUa8/lNw8Qh4L5Q3jOeRWKW0ebQPoNcfhytxN5Puq2A=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 h1:uVxuzjGe2t1sbwahSBowVHYnGzpzn8brmfn8z1UHvQg=
+go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0/go.mod h1:khKDkzYJR2x2OPUqGSmoSncdINT9lUE5IThiHPDbqZk=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0 h1:0vOvz3S4Q/KwcNCS9C7zPo0uxD6RSWktG88yGdxfV6g=
+go.opentelemetry.io/collector/receiver/receivertest v0.113.0/go.mod h1:sRq5ctm5UE/0Ar562wnCVQ1zbAie/D127D1WbtbEuEc=
+go.opentelemetry.io/collector/semconv v0.113.0 h1:twenSI7M7MJMJKW8D6a/GXxPZTPbama/weywBtV2iFw=
+go.opentelemetry.io/collector/semconv v0.113.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ=
@@ -596,8 +604,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/test/otel/testdata/builder-config.yaml b/test/otel/testdata/builder-config.yaml
index b465cc10af292..c2262c05307f8 100644
--- a/test/otel/testdata/builder-config.yaml
+++ b/test/otel/testdata/builder-config.yaml
@@ -4,57 +4,57 @@ dist:
output_path: /tmp/otel-ci/otelcol-custom
exporters:
- - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0
- - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.111.0
+ - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.113.0
+ - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0
processors:
- - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.111.0
- - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.111.0
+ - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.113.0
+ - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.113.0
receivers:
- - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.111.0
- - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.111.0
+ - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.113.0
+ - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.113.0
extensions:
- gomod: github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.60.0
path: ./comp/otelcol/ddflareextension/impl
- - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.111.0
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.111.0
+ - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.113.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.113.0
providers:
- - gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0
- - gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0
+ - gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.19.0
+ - gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.19.0
+ - gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.19.0
+ - gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.19.0
+ - gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.19.0
connectors:
- - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.111.0
+ - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.113.0
diff --git a/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml
index 4884b1e7a2964..b445834c940d6 100644
--- a/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml
+++ b/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml
@@ -36,4 +36,4 @@ checks:
description: "Available bytes not polled by log Agent"
bounds:
series: lost_bytes
- upper_bound: 0KB
+ upper_bound: 0KiB
diff --git a/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml
index bf1e2fdae573c..639cdb3b546aa 100644
--- a/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml
@@ -5,7 +5,7 @@ generator:
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131]
load_profile:
constant: 10MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/test/regression/cases/file_to_blackhole_1000ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_1000ms_latency/experiment.yaml
index 215dcfa86cc48..e19b8685b4079 100644
--- a/test/regression/cases/file_to_blackhole_1000ms_latency/experiment.yaml
+++ b/test/regression/cases/file_to_blackhole_1000ms_latency/experiment.yaml
@@ -31,3 +31,9 @@ checks:
series: total_rss_bytes
# The machine has 12GiB free.
upper_bound: 1.2GiB
+
+ - name: lost_bytes
+ description: "Allowable bytes not polled by log Agent"
+ bounds:
+ series: lost_bytes
+ upper_bound: 0KiB
diff --git a/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml
index b5b2447de1b08..0bc009d7a59fd 100644
--- a/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml
@@ -5,7 +5,7 @@ generator:
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131]
load_profile:
constant: 10MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/test/regression/cases/file_to_blackhole_1000ms_latency_linear_load/lading/lading.yaml b/test/regression/cases/file_to_blackhole_1000ms_latency_linear_load/lading/lading.yaml
index b316d44e00fb9..ca47af7871228 100644
--- a/test/regression/cases/file_to_blackhole_1000ms_latency_linear_load/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_1000ms_latency_linear_load/lading/lading.yaml
@@ -12,7 +12,7 @@ generator:
# Agent is not expected to keep up.
initial_bytes_per_second: 10MB
rate: 0.5MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml
index 64fe41015db6f..e19b8685b4079 100644
--- a/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml
+++ b/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml
@@ -36,4 +36,4 @@ checks:
description: "Allowable bytes not polled by log Agent"
bounds:
series: lost_bytes
- upper_bound: 0KB
+ upper_bound: 0KiB
diff --git a/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml
index bd1b8be967ffc..ebf0840ef0b38 100644
--- a/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml
@@ -5,7 +5,7 @@ generator:
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131]
load_profile:
constant: 10MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/test/regression/cases/file_to_blackhole_300ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_300ms_latency/experiment.yaml
index 215dcfa86cc48..e19b8685b4079 100644
--- a/test/regression/cases/file_to_blackhole_300ms_latency/experiment.yaml
+++ b/test/regression/cases/file_to_blackhole_300ms_latency/experiment.yaml
@@ -31,3 +31,9 @@ checks:
series: total_rss_bytes
# The machine has 12GiB free.
upper_bound: 1.2GiB
+
+ - name: lost_bytes
+ description: "Allowable bytes not polled by log Agent"
+ bounds:
+ series: lost_bytes
+ upper_bound: 0KiB
diff --git a/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml
index e32360a6a9856..dc1b59d89d8a5 100644
--- a/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml
@@ -5,7 +5,7 @@ generator:
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131]
load_profile:
constant: 10MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/test/regression/cases/file_to_blackhole_500ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_500ms_latency/experiment.yaml
index 215dcfa86cc48..e19b8685b4079 100644
--- a/test/regression/cases/file_to_blackhole_500ms_latency/experiment.yaml
+++ b/test/regression/cases/file_to_blackhole_500ms_latency/experiment.yaml
@@ -31,3 +31,9 @@ checks:
series: total_rss_bytes
# The machine has 12GiB free.
upper_bound: 1.2GiB
+
+ - name: lost_bytes
+ description: "Allowable bytes not polled by log Agent"
+ bounds:
+ series: lost_bytes
+ upper_bound: 0KiB
diff --git a/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml
index 87d7548924fcd..2d1b4fa83048b 100644
--- a/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml
+++ b/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml
@@ -5,7 +5,7 @@ generator:
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131]
load_profile:
constant: 10MB
- concurrent_logs: 8
+ concurrent_logs: 1
maximum_bytes_per_log: 500MB
total_rotations: 5
max_depth: 0
diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1
index 39f03f4fb7632..68724d9adaa92 100644
--- a/tools/ci/docker-login.ps1
+++ b/tools/ci/docker-login.ps1
@@ -1,7 +1,7 @@
$ErrorActionPreference = "Stop"
# ECR Login
$AWS_ECR_PASSWORD = (aws ecr get-login-password --region us-east-1)
-docker login --username AWS --password "${AWS_ECR_PASSWORD}" 486234852809.dkr.ecr.us-east-1.amazonaws.com
+powershell -Command "$(Get-Location)\tools\ci\retry.ps1 docker login --username AWS --password ${AWS_ECR_PASSWORD} 486234852809.dkr.ecr.us-east-1.amazonaws.com"
If ($lastExitCode -ne "0") {
throw "Previous command returned $lastExitCode"
}
@@ -20,7 +20,7 @@ If ($lastExitCode -ne "0") {
}
$DOCKER_REGISTRY_PWD = $(cat "$tmpfile")
Remove-Item "$tmpfile"
-docker login --username "${DOCKER_REGISTRY_LOGIN}" --password "${DOCKER_REGISTRY_PWD}" "docker.io"
+powershell -Command "$(Get-Location)\tools\ci\retry.ps1 docker login --username ${DOCKER_REGISTRY_LOGIN} --password ${DOCKER_REGISTRY_PWD} docker.io"
If ($lastExitCode -ne "0") {
throw "Previous command returned $lastExitCode"
}
diff --git a/tools/ci/fetch_secret.sh b/tools/ci/fetch_secret.sh
index dadbe93867331..178bd867a399d 100755
--- a/tools/ci/fetch_secret.sh
+++ b/tools/ci/fetch_secret.sh
@@ -9,7 +9,11 @@ set +x
while [[ $retry_count -lt $max_retries ]]; do
if [ -n "$parameter_field" ]; then
- result=$(vault kv get -field="${parameter_field}" kv/k8s/gitlab-runner/datadog-agent/"${parameter_name}" 2> errorFile)
+ vault_name="kv/k8s/gitlab-runner/datadog-agent"
+ if [[ "$(uname -s)" == "Darwin" ]]; then
+ vault_name="kv/aws/arn:aws:iam::486234852809:role/ci-datadog-agent"
+ fi
+ result=$(vault kv get -field="${parameter_field}" "${vault_name}"/"${parameter_name}" 2> errorFile)
else
result=$(aws ssm get-parameter --region us-east-1 --name "$parameter_name" --with-decryption --query "Parameter.Value" --output text 2> errorFile)
fi